Merge pull request #84677 from mikedanese/cfssl

remove cfssl dependencies
This commit is contained in:
Kubernetes Prow Robot 2019-11-04 15:28:53 -08:00 committed by GitHub
commit aaa5707896
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
133 changed files with 1552 additions and 24325 deletions

242
Godeps/LICENSES generated
View File

@ -3174,38 +3174,6 @@ SOFTWARE.
================================================================================ ================================================================================
================================================================================
= vendor/github.com/cloudflare/cfssl licensed under: =
Copyright (c) 2014 CloudFlare Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/github.com/cloudflare/cfssl/LICENSE 9bd1e7022303d9bbc29fda142f3e4fd0
================================================================================
================================================================================ ================================================================================
= vendor/github.com/clusterhq/flocker-go licensed under: = = vendor/github.com/clusterhq/flocker-go licensed under: =
@ -10263,216 +10231,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================ ================================================================================
================================================================================
= vendor/github.com/google/certificate-transparency-go licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= vendor/github.com/google/certificate-transparency-go/LICENSE 3b83ef96387f14655fc854ddc3c6bd57
================================================================================
================================================================================ ================================================================================
= vendor/github.com/google/go-cmp licensed under: = = vendor/github.com/google/go-cmp licensed under: =

View File

@ -14,15 +14,14 @@ go_test(
], ],
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/apis/certificates/v1beta1:go_default_library",
"//pkg/controller/certificates/authority:go_default_library",
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//vendor/github.com/cloudflare/cfssl/config:go_default_library",
"//vendor/github.com/cloudflare/cfssl/signer:go_default_library",
"//vendor/github.com/cloudflare/cfssl/signer/local:go_default_library",
], ],
) )

View File

@ -34,16 +34,14 @@ import (
"testing" "testing"
"time" "time"
cfsslconfig "github.com/cloudflare/cfssl/config"
cfsslsigner "github.com/cloudflare/cfssl/signer"
cfssllocal "github.com/cloudflare/cfssl/signer/local"
certapi "k8s.io/api/certificates/v1beta1" certapi "k8s.io/api/certificates/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
certutil "k8s.io/client-go/util/cert" certutil "k8s.io/client-go/util/cert"
capihelper "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
"k8s.io/kubernetes/pkg/controller/certificates/authority"
) )
// Test_buildClientCertificateManager validates that we can build a local client cert // Test_buildClientCertificateManager validates that we can build a local client cert
@ -297,28 +295,22 @@ func (s *csrSimulator) ServeHTTP(w http.ResponseWriter, req *http.Request) {
csr = csr.DeepCopy() csr = csr.DeepCopy()
csr.ResourceVersion = "2" csr.ResourceVersion = "2"
var usages []string ca := &authority.CertificateAuthority{
for _, usage := range csr.Spec.Usages { Certificate: s.serverCA,
usages = append(usages, string(usage)) PrivateKey: s.serverPrivateKey,
Backdate: s.backdate,
} }
policy := &cfsslconfig.Signing{ cr, err := capihelper.ParseCSR(csr)
Default: &cfsslconfig.SigningProfile{
Usage: usages,
Expiry: time.Hour,
ExpiryString: time.Hour.String(),
Backdate: s.backdate,
},
}
cfs, err := cfssllocal.NewSigner(s.serverPrivateKey, s.serverCA, cfsslsigner.DefaultSigAlgo(s.serverPrivateKey), policy)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
csr.Status.Certificate, err = cfs.Sign(cfsslsigner.SignRequest{ der, err := ca.Sign(cr.Raw, authority.PermissiveSigningPolicy{
Request: string(csr.Spec.Request), TTL: time.Hour,
}) })
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
csr.Status.Certificate = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: der})
csr.Status.Conditions = []certapi.CertificateSigningRequestCondition{ csr.Status.Conditions = []certapi.CertificateSigningRequestCondition{
{Type: certapi.CertificateApproved}, {Type: certapi.CertificateApproved},
} }

4
go.mod
View File

@ -29,7 +29,6 @@ require (
github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b // indirect github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b // indirect
github.com/client9/misspell v0.3.4 github.com/client9/misspell v0.3.4
github.com/cloudflare/cfssl v0.0.0-20180726162950-56268a613adf
github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313 github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313
github.com/codegangsta/negroni v1.0.0 // indirect github.com/codegangsta/negroni v1.0.0 // indirect
github.com/container-storage-interface/spec v1.1.0 github.com/container-storage-interface/spec v1.1.0
@ -66,7 +65,6 @@ require (
github.com/golang/mock v1.2.0 github.com/golang/mock v1.2.0
github.com/golang/protobuf v1.3.2 github.com/golang/protobuf v1.3.2
github.com/google/cadvisor v0.34.0 github.com/google/cadvisor v0.34.0
github.com/google/certificate-transparency-go v1.0.21 // indirect
github.com/google/go-cmp v0.3.0 github.com/google/go-cmp v0.3.0
github.com/google/gofuzz v1.0.0 github.com/google/gofuzz v1.0.0
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d
@ -215,7 +213,6 @@ replace (
github.com/checkpoint-restore/go-criu => github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b github.com/checkpoint-restore/go-criu => github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b
github.com/cheekybits/genny => github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9 github.com/cheekybits/genny => github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9
github.com/client9/misspell => github.com/client9/misspell v0.3.4 github.com/client9/misspell => github.com/client9/misspell v0.3.4
github.com/cloudflare/cfssl => github.com/cloudflare/cfssl v0.0.0-20180726162950-56268a613adf
github.com/clusterhq/flocker-go => github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313 github.com/clusterhq/flocker-go => github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313
github.com/cockroachdb/datadriven => github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa github.com/cockroachdb/datadriven => github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa
github.com/codegangsta/negroni => github.com/codegangsta/negroni v1.0.0 github.com/codegangsta/negroni => github.com/codegangsta/negroni v1.0.0
@ -285,7 +282,6 @@ replace (
github.com/golangplus/testing => github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e github.com/golangplus/testing => github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e
github.com/google/btree => github.com/google/btree v1.0.0 github.com/google/btree => github.com/google/btree v1.0.0
github.com/google/cadvisor => github.com/google/cadvisor v0.34.0 github.com/google/cadvisor => github.com/google/cadvisor v0.34.0
github.com/google/certificate-transparency-go => github.com/google/certificate-transparency-go v1.0.21
github.com/google/go-cmp => github.com/google/go-cmp v0.3.0 github.com/google/go-cmp => github.com/google/go-cmp v0.3.0
github.com/google/gofuzz => github.com/google/gofuzz v1.0.0 github.com/google/gofuzz => github.com/google/gofuzz v1.0.0
github.com/google/martian => github.com/google/martian v2.1.0+incompatible github.com/google/martian => github.com/google/martian v2.1.0+incompatible

4
go.sum
View File

@ -78,8 +78,6 @@ github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod
github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/cfssl v0.0.0-20180726162950-56268a613adf h1:eOyFuj3h/Vj5e4voOM16NNrHsUR3jhD0duh76LHMj6Y=
github.com/cloudflare/cfssl v0.0.0-20180726162950-56268a613adf/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313 h1:eIHD9GNM3Hp7kcRW5mvcz7WTR3ETeoYYKwpgA04kaXE= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313 h1:eIHD9GNM3Hp7kcRW5mvcz7WTR3ETeoYYKwpgA04kaXE=
github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=
@ -209,8 +207,6 @@ github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/cadvisor v0.34.0 h1:No7G6U/TasplR9uNqyc5Jj0Bet5VSYsK5xLygOf4pUw= github.com/google/cadvisor v0.34.0 h1:No7G6U/TasplR9uNqyc5Jj0Bet5VSYsK5xLygOf4pUw=
github.com/google/cadvisor v0.34.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= github.com/google/cadvisor v0.34.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48=
github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE=
github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=

View File

@ -118,6 +118,7 @@
{ {
"SelectorRegexp": "k8s[.]io/client-go/", "SelectorRegexp": "k8s[.]io/client-go/",
"AllowedPrefixes": [ "AllowedPrefixes": [
"k8s.io/client-go/util/keyutil",
"k8s.io/client-go/discovery", "k8s.io/client-go/discovery",
"k8s.io/client-go/dynamic", "k8s.io/client-go/dynamic",
"k8s.io/client-go/informers", "k8s.io/client-go/informers",
@ -146,9 +147,9 @@
"k8s.io/client-go/listers/autoscaling/v1", "k8s.io/client-go/listers/autoscaling/v1",
"k8s.io/client-go/listers/batch/v1", "k8s.io/client-go/listers/batch/v1",
"k8s.io/client-go/listers/certificates/v1beta1", "k8s.io/client-go/listers/certificates/v1beta1",
"k8s.io/client-go/listers/coordination/v1",
"k8s.io/client-go/listers/core/v1", "k8s.io/client-go/listers/core/v1",
"k8s.io/client-go/listers/discovery/v1alpha1", "k8s.io/client-go/listers/discovery/v1alpha1",
"k8s.io/client-go/listers/coordination/v1",
"k8s.io/client-go/listers/extensions/v1beta1", "k8s.io/client-go/listers/extensions/v1beta1",
"k8s.io/client-go/listers/policy/v1beta1", "k8s.io/client-go/listers/policy/v1beta1",
"k8s.io/client-go/listers/rbac/v1", "k8s.io/client-go/listers/rbac/v1",
@ -164,12 +165,12 @@
"k8s.io/client-go/tools/record", "k8s.io/client-go/tools/record",
"k8s.io/client-go/tools/reference", "k8s.io/client-go/tools/reference",
"k8s.io/client-go/tools/watch", "k8s.io/client-go/tools/watch",
"k8s.io/client-go/transport",
"k8s.io/client-go/util/cert", "k8s.io/client-go/util/cert",
"k8s.io/client-go/util/flowcontrol", "k8s.io/client-go/util/flowcontrol",
"k8s.io/client-go/util/retry", "k8s.io/client-go/util/retry",
"k8s.io/client-go/util/workqueue",
"k8s.io/client-go/util/testing", "k8s.io/client-go/util/testing",
"k8s.io/client-go/transport" "k8s.io/client-go/util/workqueue"
] ]
}, },
{ {

View File

@ -42,6 +42,7 @@ filegroup(
srcs = [ srcs = [
":package-srcs", ":package-srcs",
"//pkg/controller/certificates/approver:all-srcs", "//pkg/controller/certificates/approver:all-srcs",
"//pkg/controller/certificates/authority:all-srcs",
"//pkg/controller/certificates/cleaner:all-srcs", "//pkg/controller/certificates/cleaner:all-srcs",
"//pkg/controller/certificates/rootcacertpublisher:all-srcs", "//pkg/controller/certificates/rootcacertpublisher:all-srcs",
"//pkg/controller/certificates/signer:all-srcs", "//pkg/controller/certificates/signer:all-srcs",

View File

@ -27,7 +27,7 @@ import (
capi "k8s.io/api/certificates/v1beta1" capi "k8s.io/api/certificates/v1beta1"
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1" certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
k8s_certificates_v1beta1 "k8s.io/kubernetes/pkg/apis/certificates/v1beta1" capihelper "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
"k8s.io/kubernetes/pkg/controller/certificates" "k8s.io/kubernetes/pkg/controller/certificates"
) )
@ -79,7 +79,7 @@ func (a *sarApprover) handle(csr *capi.CertificateSigningRequest) error {
if approved, denied := certificates.GetCertApprovalCondition(&csr.Status); approved || denied { if approved, denied := certificates.GetCertApprovalCondition(&csr.Status); approved || denied {
return nil return nil
} }
x509cr, err := k8s_certificates_v1beta1.ParseCSR(csr) x509cr, err := capihelper.ParseCSR(csr)
if err != nil { if err != nil {
return fmt.Errorf("unable to parse csr %q: %v", csr.Name, err) return fmt.Errorf("unable to parse csr %q: %v", csr.Name, err)
} }
@ -173,7 +173,7 @@ func isNodeClientCert(csr *capi.CertificateSigningRequest, x509cr *x509.Certific
if !reflect.DeepEqual([]string{"system:nodes"}, x509cr.Subject.Organization) { if !reflect.DeepEqual([]string{"system:nodes"}, x509cr.Subject.Organization) {
return false return false
} }
if (len(x509cr.DNSNames) > 0) || (len(x509cr.EmailAddresses) > 0) || (len(x509cr.IPAddresses) > 0) { if len(x509cr.DNSNames) > 0 || len(x509cr.EmailAddresses) > 0 || len(x509cr.IPAddresses) > 0 || len(x509cr.URIs) > 0 {
return false return false
} }
if !hasExactUsages(csr, kubeletClientUsages) { if !hasExactUsages(csr, kubeletClientUsages) {

View File

@ -0,0 +1,47 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"authority.go",
"policies.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/certificates/authority",
visibility = [
# This cmd/kubelet dependency is used for testing. We should migrate
# that test to an integration test that uses the certificates
# controller, and remove the dependency.
"//cmd/kubelet/app:__pkg__",
"//pkg/controller/certificates:__subpackages__",
],
deps = ["//staging/src/k8s.io/api/certificates/v1beta1:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = [
"authority_test.go",
"policies_test.go",
],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
"//vendor/github.com/google/go-cmp/cmp/cmpopts:go_default_library",
],
)

View File

@ -0,0 +1,94 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package authority
import (
"crypto"
"crypto/rand"
"crypto/x509"
"fmt"
"math/big"
"time"
)
var serialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128)
// CertificateAuthority implements a certificate authority that supports policy
// based signing. It's used by the signing controller.
type CertificateAuthority struct {
Certificate *x509.Certificate
PrivateKey crypto.Signer
Backdate time.Duration
Now func() time.Time
}
// Sign signs a certificate request, applying a SigningPolicy and returns a DER
// encoded x509 certificate.
func (ca *CertificateAuthority) Sign(crDER []byte, policy SigningPolicy) ([]byte, error) {
now := time.Now()
if ca.Now != nil {
now = ca.Now()
}
nbf := now.Add(-ca.Backdate)
if !nbf.Before(ca.Certificate.NotAfter) {
return nil, fmt.Errorf("the signer has expired: NotAfter=%v", ca.Certificate.NotAfter)
}
cr, err := x509.ParseCertificateRequest(crDER)
if err != nil {
return nil, fmt.Errorf("unable to parse certificate request: %v", err)
}
if err := cr.CheckSignature(); err != nil {
return nil, fmt.Errorf("unable to verify certificate request signature: %v", err)
}
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, fmt.Errorf("unable to generate a serial number for %s: %v", cr.Subject.CommonName, err)
}
tmpl := &x509.Certificate{
SerialNumber: serialNumber,
Subject: cr.Subject,
DNSNames: cr.DNSNames,
IPAddresses: cr.IPAddresses,
EmailAddresses: cr.EmailAddresses,
URIs: cr.URIs,
PublicKeyAlgorithm: cr.PublicKeyAlgorithm,
PublicKey: cr.PublicKey,
Extensions: cr.Extensions,
ExtraExtensions: cr.ExtraExtensions,
NotBefore: nbf,
}
if err := policy.apply(tmpl); err != nil {
return nil, err
}
if !tmpl.NotAfter.Before(ca.Certificate.NotAfter) {
tmpl.NotAfter = ca.Certificate.NotAfter
}
if !now.Before(ca.Certificate.NotAfter) {
return nil, fmt.Errorf("refusing to sign a certificate that expired in the past")
}
der, err := x509.CreateCertificate(rand.Reader, tmpl, ca.Certificate, cr.PublicKey, ca.PrivateKey)
if err != nil {
return nil, fmt.Errorf("failed to sign certificate: %v", err)
}
return der, nil
}

View File

@ -0,0 +1,199 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package authority
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"math/big"
"net/url"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
capi "k8s.io/api/certificates/v1beta1"
"k8s.io/apimachinery/pkg/util/diff"
)
func TestCertificateAuthority(t *testing.T) {
caKey, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
if err != nil {
t.Fatal(err)
}
now := time.Now()
tmpl := &x509.Certificate{
SerialNumber: big.NewInt(42),
Subject: pkix.Name{
CommonName: "test-ca",
},
NotBefore: now.Add(-24 * time.Hour),
NotAfter: now.Add(24 * time.Hour),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
IsCA: true,
}
der, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, caKey.Public(), caKey)
if err != nil {
t.Fatal(err)
}
caCert, err := x509.ParseCertificate(der)
if err != nil {
t.Fatal(err)
}
uri, err := url.Parse("help://me@what:8080/where/when?why=true")
if err != nil {
t.Fatal(err)
}
tests := []struct {
name string
cr x509.CertificateRequest
backdate time.Duration
policy SigningPolicy
want x509.Certificate
wantErr bool
}{
{
name: "ca info",
policy: PermissiveSigningPolicy{TTL: time.Hour},
want: x509.Certificate{
Issuer: caCert.Subject,
AuthorityKeyId: caCert.SubjectKeyId,
NotBefore: now,
NotAfter: now.Add(1 * time.Hour),
BasicConstraintsValid: true,
},
},
{
name: "key usage",
policy: PermissiveSigningPolicy{TTL: time.Hour, Usages: []capi.KeyUsage{"signing"}},
want: x509.Certificate{
NotBefore: now,
NotAfter: now.Add(1 * time.Hour),
BasicConstraintsValid: true,
KeyUsage: x509.KeyUsageDigitalSignature,
},
},
{
name: "ext key usage",
policy: PermissiveSigningPolicy{TTL: time.Hour, Usages: []capi.KeyUsage{"client auth"}},
want: x509.Certificate{
NotBefore: now,
NotAfter: now.Add(1 * time.Hour),
BasicConstraintsValid: true,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
},
},
{
name: "backdate",
policy: PermissiveSigningPolicy{TTL: time.Hour},
backdate: 5 * time.Minute,
want: x509.Certificate{
NotBefore: now.Add(-5 * time.Minute),
NotAfter: now.Add(55 * time.Minute),
BasicConstraintsValid: true,
},
},
{
name: "truncate expiration",
policy: PermissiveSigningPolicy{TTL: 48 * time.Hour},
want: x509.Certificate{
NotBefore: now,
NotAfter: now.Add(24 * time.Hour),
BasicConstraintsValid: true,
},
},
{
name: "uri sans",
policy: PermissiveSigningPolicy{TTL: time.Hour},
cr: x509.CertificateRequest{
URIs: []*url.URL{uri},
},
want: x509.Certificate{
URIs: []*url.URL{uri},
NotBefore: now,
NotAfter: now.Add(1 * time.Hour),
BasicConstraintsValid: true,
},
},
}
crKey, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
if err != nil {
t.Fatal(err)
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ca := &CertificateAuthority{
Certificate: caCert,
PrivateKey: caKey,
Now: func() time.Time {
return now
},
Backdate: test.backdate,
}
csr, err := x509.CreateCertificateRequest(rand.Reader, &test.cr, crKey)
if err != nil {
t.Fatal(err)
}
certDER, err := ca.Sign(csr, test.policy)
if err != nil {
t.Fatal(err)
}
if test.wantErr {
if err == nil {
t.Fatal("expected error")
}
return
}
cert, err := x509.ParseCertificate(certDER)
if err != nil {
t.Fatal(err)
}
opts := cmp.Options{
cmpopts.IgnoreFields(x509.Certificate{},
"SignatureAlgorithm",
"PublicKeyAlgorithm",
"Version",
"MaxPathLen",
),
diff.IgnoreUnset(),
cmp.Transformer("RoundTime", func(x time.Time) time.Time {
return x.Truncate(time.Second)
}),
cmp.Comparer(func(x, y *url.URL) bool {
return ((x == nil) && (y == nil)) || x.String() == y.String()
}),
}
if !cmp.Equal(*cert, test.want, opts) {
t.Errorf("unexpected diff: %v", cmp.Diff(*cert, test.want, opts))
}
})
}
}

View File

@ -0,0 +1,142 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package authority
import (
"crypto/x509"
"fmt"
"sort"
"time"
capi "k8s.io/api/certificates/v1beta1"
)
// SigningPolicy validates a CertificateRequest before it's signed by the
// CertificateAuthority. It may default or otherwise mutate a certificate
// template.
type SigningPolicy interface {
// not-exporting apply forces signing policy implementations to be internal
// to this package.
apply(template *x509.Certificate) error
}
// PermissiveSigningPolicy is the signing policy historically used by the local
// signer.
//
// * It forwards all SANs from the original signing request.
// * It sets allowed usages as configured in the policy.
// * It sets NotAfter based on the TTL configured in the policy.
// * It zeros all extensions.
// * It sets BasicConstraints to true.
// * It sets IsCA to false.
type PermissiveSigningPolicy struct {
// TTL is the certificate TTL. It's used to calculate the NotAfter value of
// the certificate.
TTL time.Duration
// Usages are the allowed usages of a certficate.
Usages []capi.KeyUsage
}
func (p PermissiveSigningPolicy) apply(tmpl *x509.Certificate) error {
usage, extUsages, err := keyUsagesFromStrings(p.Usages)
if err != nil {
return err
}
tmpl.KeyUsage = usage
tmpl.ExtKeyUsage = extUsages
tmpl.NotAfter = tmpl.NotBefore.Add(p.TTL)
tmpl.ExtraExtensions = nil
tmpl.Extensions = nil
tmpl.BasicConstraintsValid = true
tmpl.IsCA = false
return nil
}
var keyUsageDict = map[capi.KeyUsage]x509.KeyUsage{
capi.UsageSigning: x509.KeyUsageDigitalSignature,
capi.UsageDigitalSignature: x509.KeyUsageDigitalSignature,
capi.UsageContentCommitment: x509.KeyUsageContentCommitment,
capi.UsageKeyEncipherment: x509.KeyUsageKeyEncipherment,
capi.UsageKeyAgreement: x509.KeyUsageKeyAgreement,
capi.UsageDataEncipherment: x509.KeyUsageDataEncipherment,
capi.UsageCertSign: x509.KeyUsageCertSign,
capi.UsageCRLSign: x509.KeyUsageCRLSign,
capi.UsageEncipherOnly: x509.KeyUsageEncipherOnly,
capi.UsageDecipherOnly: x509.KeyUsageDecipherOnly,
}
var extKeyUsageDict = map[capi.KeyUsage]x509.ExtKeyUsage{
capi.UsageAny: x509.ExtKeyUsageAny,
capi.UsageServerAuth: x509.ExtKeyUsageServerAuth,
capi.UsageClientAuth: x509.ExtKeyUsageClientAuth,
capi.UsageCodeSigning: x509.ExtKeyUsageCodeSigning,
capi.UsageEmailProtection: x509.ExtKeyUsageEmailProtection,
capi.UsageSMIME: x509.ExtKeyUsageEmailProtection,
capi.UsageIPsecEndSystem: x509.ExtKeyUsageIPSECEndSystem,
capi.UsageIPsecTunnel: x509.ExtKeyUsageIPSECTunnel,
capi.UsageIPsecUser: x509.ExtKeyUsageIPSECUser,
capi.UsageTimestamping: x509.ExtKeyUsageTimeStamping,
capi.UsageOCSPSigning: x509.ExtKeyUsageOCSPSigning,
capi.UsageMicrosoftSGC: x509.ExtKeyUsageMicrosoftServerGatedCrypto,
capi.UsageNetscapeSGC: x509.ExtKeyUsageNetscapeServerGatedCrypto,
}
// keyUsagesFromStrings will translate a slice of usage strings from the
// certificates API ("pkg/apis/certificates".KeyUsage) to x509.KeyUsage and
// x509.ExtKeyUsage types.
func keyUsagesFromStrings(usages []capi.KeyUsage) (x509.KeyUsage, []x509.ExtKeyUsage, error) {
var keyUsage x509.KeyUsage
var unrecognized []capi.KeyUsage
extKeyUsages := make(map[x509.ExtKeyUsage]struct{})
for _, usage := range usages {
if val, ok := keyUsageDict[usage]; ok {
keyUsage |= val
} else if val, ok := extKeyUsageDict[usage]; ok {
extKeyUsages[val] = struct{}{}
} else {
unrecognized = append(unrecognized, usage)
}
}
var sorted sortedExtKeyUsage
for eku := range extKeyUsages {
sorted = append(sorted, eku)
}
sort.Sort(sorted)
if len(unrecognized) > 0 {
return 0, nil, fmt.Errorf("unrecognized usage values: %q", unrecognized)
}
return keyUsage, []x509.ExtKeyUsage(sorted), nil
}
type sortedExtKeyUsage []x509.ExtKeyUsage
func (s sortedExtKeyUsage) Len() int {
return len(s)
}
func (s sortedExtKeyUsage) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s sortedExtKeyUsage) Less(i, j int) bool {
return s[i] < s[j]
}

View File

@ -0,0 +1,93 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package authority
import (
"crypto/x509"
"fmt"
"reflect"
"testing"
capi "k8s.io/api/certificates/v1beta1"
)
func TestKeyUsagesFromStrings(t *testing.T) {
testcases := []struct {
usages []capi.KeyUsage
expectedKeyUsage x509.KeyUsage
expectedExtKeyUsage []x509.ExtKeyUsage
expectErr bool
}{
{
usages: []capi.KeyUsage{"signing"},
expectedKeyUsage: x509.KeyUsageDigitalSignature,
expectedExtKeyUsage: nil,
expectErr: false,
},
{
usages: []capi.KeyUsage{"client auth"},
expectedKeyUsage: 0,
expectedExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
expectErr: false,
},
{
usages: []capi.KeyUsage{"client auth", "client auth"},
expectedKeyUsage: 0,
expectedExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
expectErr: false,
},
{
usages: []capi.KeyUsage{"cert sign", "encipher only"},
expectedKeyUsage: x509.KeyUsageCertSign | x509.KeyUsageEncipherOnly,
expectedExtKeyUsage: nil,
expectErr: false,
},
{
usages: []capi.KeyUsage{"ocsp signing", "crl sign", "s/mime", "content commitment"},
expectedKeyUsage: x509.KeyUsageCRLSign | x509.KeyUsageContentCommitment,
expectedExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageEmailProtection, x509.ExtKeyUsageOCSPSigning},
expectErr: false,
},
{
usages: []capi.KeyUsage{"unsupported string"},
expectedKeyUsage: 0,
expectedExtKeyUsage: nil,
expectErr: true,
},
}
for _, tc := range testcases {
t.Run(fmt.Sprint(tc.usages), func(t *testing.T) {
ku, eku, err := keyUsagesFromStrings(tc.usages)
if tc.expectErr {
if err == nil {
t.Errorf("did not return an error, but expected one")
}
return
}
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if ku != tc.expectedKeyUsage || !reflect.DeepEqual(eku, tc.expectedExtKeyUsage) {
t.Errorf("got=(%v, %v), want=(%v, %v)", ku, eku, tc.expectedKeyUsage, tc.expectedExtKeyUsage)
}
})
}
}

View File

@ -23,7 +23,6 @@ import (
"time" "time"
"golang.org/x/time/rate" "golang.org/x/time/rate"
"k8s.io/klog"
certificates "k8s.io/api/certificates/v1beta1" certificates "k8s.io/api/certificates/v1beta1"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
@ -36,6 +35,7 @@ import (
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
) )

View File

@ -17,10 +17,12 @@ limitations under the License.
package certificates package certificates
import ( import (
"github.com/stretchr/testify/assert"
"k8s.io/api/certificates/v1beta1"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"testing" "testing"
"github.com/stretchr/testify/assert"
"k8s.io/api/certificates/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
func TestIsCertificateRequestApproved(t *testing.T) { func TestIsCertificateRequestApproved(t *testing.T) {

View File

@ -20,7 +20,7 @@ import (
"reflect" "reflect"
"testing" "testing"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/diff"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"

View File

@ -8,7 +8,7 @@ load(
go_test( go_test(
name = "go_default_test", name = "go_default_test",
srcs = ["cfssl_signer_test.go"], srcs = ["signer_test.go"],
data = [ data = [
"testdata/ca.crt", "testdata/ca.crt",
"testdata/ca.key", "testdata/ca.key",
@ -17,23 +17,26 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
], ],
) )
go_library( go_library(
name = "go_default_library", name = "go_default_library",
srcs = ["cfssl_signer.go"], srcs = ["signer.go"],
importpath = "k8s.io/kubernetes/pkg/controller/certificates/signer", importpath = "k8s.io/kubernetes/pkg/controller/certificates/signer",
deps = [ deps = [
"//pkg/apis/certificates/v1beta1:go_default_library",
"//pkg/controller/certificates:go_default_library", "//pkg/controller/certificates:go_default_library",
"//pkg/controller/certificates/authority:go_default_library",
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/informers/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/informers/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/github.com/cloudflare/cfssl/config:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//vendor/github.com/cloudflare/cfssl/helpers:go_default_library", "//staging/src/k8s.io/client-go/util/keyutil:go_default_library",
"//vendor/github.com/cloudflare/cfssl/signer:go_default_library",
"//vendor/github.com/cloudflare/cfssl/signer/local:go_default_library",
], ],
) )

View File

@ -1,153 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package signer implements a CA signer that uses keys stored on local disk.
package signer
import (
"crypto"
"crypto/x509"
"fmt"
"io/ioutil"
"os"
"time"
capi "k8s.io/api/certificates/v1beta1"
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/controller/certificates"
"github.com/cloudflare/cfssl/config"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/signer"
"github.com/cloudflare/cfssl/signer/local"
)
func NewCSRSigningController(
client clientset.Interface,
csrInformer certificatesinformers.CertificateSigningRequestInformer,
caFile, caKeyFile string,
certificateDuration time.Duration,
) (*certificates.CertificateController, error) {
signer, err := newCFSSLSigner(caFile, caKeyFile, client, certificateDuration)
if err != nil {
return nil, err
}
return certificates.NewCertificateController(
"csrsigning",
client,
csrInformer,
signer.handle,
), nil
}
type cfsslSigner struct {
ca *x509.Certificate
priv crypto.Signer
sigAlgo x509.SignatureAlgorithm
client clientset.Interface
certificateDuration time.Duration
// nowFn returns the current time. We have here for unit testing
nowFn func() time.Time
}
func newCFSSLSigner(caFile, caKeyFile string, client clientset.Interface, certificateDuration time.Duration) (*cfsslSigner, error) {
ca, err := ioutil.ReadFile(caFile)
if err != nil {
return nil, fmt.Errorf("error reading CA cert file %q: %v", caFile, err)
}
cakey, err := ioutil.ReadFile(caKeyFile)
if err != nil {
return nil, fmt.Errorf("error reading CA key file %q: %v", caKeyFile, err)
}
parsedCa, err := helpers.ParseCertificatePEM(ca)
if err != nil {
return nil, fmt.Errorf("error parsing CA cert file %q: %v", caFile, err)
}
strPassword := os.Getenv("CFSSL_CA_PK_PASSWORD")
password := []byte(strPassword)
if strPassword == "" {
password = nil
}
priv, err := helpers.ParsePrivateKeyPEMWithPassword(cakey, password)
if err != nil {
return nil, fmt.Errorf("malformed private key %v", err)
}
return &cfsslSigner{
priv: priv,
ca: parsedCa,
sigAlgo: signer.DefaultSigAlgo(priv),
client: client,
certificateDuration: certificateDuration,
nowFn: time.Now,
}, nil
}
func (s *cfsslSigner) handle(csr *capi.CertificateSigningRequest) error {
if !certificates.IsCertificateRequestApproved(csr) {
return nil
}
csr, err := s.sign(csr)
if err != nil {
return fmt.Errorf("error auto signing csr: %v", err)
}
_, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(csr)
if err != nil {
return fmt.Errorf("error updating signature for csr: %v", err)
}
return nil
}
func (s *cfsslSigner) sign(csr *capi.CertificateSigningRequest) (*capi.CertificateSigningRequest, error) {
var usages []string
for _, usage := range csr.Spec.Usages {
usages = append(usages, string(usage))
}
certExpiryDuration := s.certificateDuration
durationUntilExpiry := s.ca.NotAfter.Sub(s.nowFn())
if durationUntilExpiry <= 0 {
return nil, fmt.Errorf("the signer has expired: %v", s.ca.NotAfter)
}
if durationUntilExpiry < certExpiryDuration {
certExpiryDuration = durationUntilExpiry
}
policy := &config.Signing{
Default: &config.SigningProfile{
Usage: usages,
Expiry: certExpiryDuration,
ExpiryString: certExpiryDuration.String(),
},
}
cfs, err := local.NewSigner(s.priv, s.ca, s.sigAlgo, policy)
if err != nil {
return nil, err
}
csr.Status.Certificate, err = cfs.Sign(signer.SignRequest{
Request: string(csr.Spec.Request),
})
if err != nil {
return nil, err
}
return csr, nil
}

View File

@ -1,195 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package signer
import (
"crypto/x509"
"io/ioutil"
"reflect"
"strings"
"testing"
"time"
capi "k8s.io/api/certificates/v1beta1"
"k8s.io/client-go/util/cert"
)
func TestSigner(t *testing.T) {
testNow := time.Now()
testNowFn := func() time.Time {
return testNow
}
s, err := newCFSSLSigner("./testdata/ca.crt", "./testdata/ca.key", nil, 1*time.Hour)
if err != nil {
t.Fatalf("failed to create signer: %v", err)
}
s.nowFn = testNowFn
csrb, err := ioutil.ReadFile("./testdata/kubelet.csr")
if err != nil {
t.Fatalf("failed to read CSR: %v", err)
}
csr := &capi.CertificateSigningRequest{
Spec: capi.CertificateSigningRequestSpec{
Request: []byte(csrb),
Usages: []capi.KeyUsage{
capi.UsageSigning,
capi.UsageKeyEncipherment,
capi.UsageServerAuth,
capi.UsageClientAuth,
},
},
}
csr, err = s.sign(csr)
if err != nil {
t.Fatalf("failed to sign CSR: %v", err)
}
certData := csr.Status.Certificate
if len(certData) == 0 {
t.Fatalf("expected a certificate after signing")
}
certs, err := cert.ParseCertsPEM(certData)
if err != nil {
t.Fatalf("failed to parse certificate: %v", err)
}
if len(certs) != 1 {
t.Fatalf("expected one certificate")
}
crt := certs[0]
if crt.Subject.CommonName != "system:node:k-a-node-s36b" {
t.Errorf("expected common name of 'system:node:k-a-node-s36b', but got: %v", certs[0].Subject.CommonName)
}
if !reflect.DeepEqual(crt.Subject.Organization, []string{"system:nodes"}) {
t.Errorf("expected organization to be [system:nodes] but got: %v", crt.Subject.Organization)
}
if crt.KeyUsage != x509.KeyUsageDigitalSignature|x509.KeyUsageKeyEncipherment {
t.Errorf("bad key usage")
}
if !reflect.DeepEqual(crt.ExtKeyUsage, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}) {
t.Errorf("bad extended key usage")
}
expectedTime := testNow.Add(1 * time.Hour)
// there is some jitter that we need to tolerate
diff := expectedTime.Sub(crt.NotAfter)
if diff > 10*time.Minute || diff < -10*time.Minute {
t.Fatal(crt.NotAfter)
}
}
func TestSignerExpired(t *testing.T) {
hundredYearsFromNowFn := func() time.Time {
return time.Now().Add(24 * time.Hour * 365 * 100)
}
s, err := newCFSSLSigner("./testdata/ca.crt", "./testdata/ca.key", nil, 1*time.Hour)
if err != nil {
t.Fatalf("failed to create signer: %v", err)
}
s.nowFn = hundredYearsFromNowFn
csrb, err := ioutil.ReadFile("./testdata/kubelet.csr")
if err != nil {
t.Fatalf("failed to read CSR: %v", err)
}
csr := &capi.CertificateSigningRequest{
Spec: capi.CertificateSigningRequestSpec{
Request: []byte(csrb),
Usages: []capi.KeyUsage{
capi.UsageSigning,
capi.UsageKeyEncipherment,
capi.UsageServerAuth,
capi.UsageClientAuth,
},
},
}
_, err = s.sign(csr)
if err == nil {
t.Fatal("missing error")
}
if !strings.HasPrefix(err.Error(), "the signer has expired") {
t.Fatal(err)
}
}
func TestDurationLongerThanExpiry(t *testing.T) {
testNow := time.Now()
testNowFn := func() time.Time {
return testNow
}
hundredYears := 24 * time.Hour * 365 * 100
s, err := newCFSSLSigner("./testdata/ca.crt", "./testdata/ca.key", nil, hundredYears)
if err != nil {
t.Fatalf("failed to create signer: %v", err)
}
s.nowFn = testNowFn
csrb, err := ioutil.ReadFile("./testdata/kubelet.csr")
if err != nil {
t.Fatalf("failed to read CSR: %v", err)
}
csr := &capi.CertificateSigningRequest{
Spec: capi.CertificateSigningRequestSpec{
Request: []byte(csrb),
Usages: []capi.KeyUsage{
capi.UsageSigning,
capi.UsageKeyEncipherment,
capi.UsageServerAuth,
capi.UsageClientAuth,
},
},
}
_, err = s.sign(csr)
if err != nil {
t.Fatalf("failed to sign CSR: %v", err)
}
// now we just need to verify that the expiry is based on the signing cert
certData := csr.Status.Certificate
if len(certData) == 0 {
t.Fatalf("expected a certificate after signing")
}
certs, err := cert.ParseCertsPEM(certData)
if err != nil {
t.Fatalf("failed to parse certificate: %v", err)
}
if len(certs) != 1 {
t.Fatalf("expected one certificate")
}
crt := certs[0]
expected, err := time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", "2044-05-09 00:20:11 +0000 UTC")
if err != nil {
t.Fatal(err)
}
// there is some jitter that we need to tolerate
diff := expected.Sub(crt.NotAfter)
if diff > 10*time.Minute || diff < -10*time.Minute {
t.Fatal(crt.NotAfter)
}
}

View File

@ -0,0 +1,129 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package signer implements a CA signer that uses keys stored on local disk.
package signer
import (
"crypto"
"encoding/pem"
"fmt"
"io/ioutil"
"time"
capi "k8s.io/api/certificates/v1beta1"
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
capihelper "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
"k8s.io/kubernetes/pkg/controller/certificates"
"k8s.io/kubernetes/pkg/controller/certificates/authority"
)
func NewCSRSigningController(
client clientset.Interface,
csrInformer certificatesinformers.CertificateSigningRequestInformer,
caFile, caKeyFile string,
certTTL time.Duration,
) (*certificates.CertificateController, error) {
signer, err := newSigner(caFile, caKeyFile, client, certTTL)
if err != nil {
return nil, err
}
return certificates.NewCertificateController(
"csrsigning",
client,
csrInformer,
signer.handle,
), nil
}
type signer struct {
ca *authority.CertificateAuthority
client clientset.Interface
certTTL time.Duration
}
func newSigner(caFile, caKeyFile string, client clientset.Interface, certificateDuration time.Duration) (*signer, error) {
certPEM, err := ioutil.ReadFile(caFile)
if err != nil {
return nil, fmt.Errorf("error reading CA cert file %q: %v", caFile, err)
}
certs, err := cert.ParseCertsPEM(certPEM)
if err != nil {
return nil, fmt.Errorf("error reading CA cert file %q: %v", caFile, err)
}
if len(certs) != 1 {
return nil, fmt.Errorf("error reading CA cert file %q: expected 1 certificate, found %d", caFile, len(certs))
}
keyPEM, err := ioutil.ReadFile(caKeyFile)
if err != nil {
return nil, fmt.Errorf("error reading CA key file %q: %v", caKeyFile, err)
}
key, err := keyutil.ParsePrivateKeyPEM(keyPEM)
if err != nil {
return nil, fmt.Errorf("error reading CA key file %q: %v", caKeyFile, err)
}
priv, ok := key.(crypto.Signer)
if !ok {
return nil, fmt.Errorf("error reading CA key file %q: key did not implement crypto.Signer", caKeyFile)
}
return &signer{
ca: &authority.CertificateAuthority{
Certificate: certs[0],
PrivateKey: priv,
Backdate: 5 * time.Minute,
},
client: client,
certTTL: certificateDuration,
}, nil
}
func (s *signer) handle(csr *capi.CertificateSigningRequest) error {
if !certificates.IsCertificateRequestApproved(csr) {
return nil
}
csr, err := s.sign(csr)
if err != nil {
return fmt.Errorf("error auto signing csr: %v", err)
}
_, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(csr)
if err != nil {
return fmt.Errorf("error updating signature for csr: %v", err)
}
return nil
}
func (s *signer) sign(csr *capi.CertificateSigningRequest) (*capi.CertificateSigningRequest, error) {
x509cr, err := capihelper.ParseCSR(csr)
if err != nil {
return nil, fmt.Errorf("unable to parse csr %q: %v", csr.Name, err)
}
der, err := s.ca.Sign(x509cr.Raw, authority.PermissiveSigningPolicy{
TTL: s.certTTL,
Usages: csr.Spec.Usages,
})
if err != nil {
return nil, err
}
csr.Status.Certificate = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: der})
return csr, nil
}

View File

@ -0,0 +1,96 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package signer
import (
"crypto/x509"
"crypto/x509/pkix"
"io/ioutil"
"testing"
"time"
"github.com/google/go-cmp/cmp"
capi "k8s.io/api/certificates/v1beta1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/client-go/util/cert"
)
func TestSigner(t *testing.T) {
clock := clock.FakeClock{}
s, err := newSigner("./testdata/ca.crt", "./testdata/ca.key", nil, 1*time.Hour)
if err != nil {
t.Fatalf("failed to create signer: %v", err)
}
s.ca.Now = clock.Now
s.ca.Backdate = 0
csrb, err := ioutil.ReadFile("./testdata/kubelet.csr")
if err != nil {
t.Fatalf("failed to read CSR: %v", err)
}
csr := &capi.CertificateSigningRequest{
Spec: capi.CertificateSigningRequestSpec{
Request: []byte(csrb),
Usages: []capi.KeyUsage{
capi.UsageSigning,
capi.UsageKeyEncipherment,
capi.UsageServerAuth,
capi.UsageClientAuth,
},
},
}
csr, err = s.sign(csr)
if err != nil {
t.Fatalf("failed to sign CSR: %v", err)
}
certData := csr.Status.Certificate
if len(certData) == 0 {
t.Fatalf("expected a certificate after signing")
}
certs, err := cert.ParseCertsPEM(certData)
if err != nil {
t.Fatalf("failed to parse certificate: %v", err)
}
if len(certs) != 1 {
t.Fatalf("expected one certificate")
}
want := x509.Certificate{
Version: 3,
Subject: pkix.Name{
CommonName: "system:node:k-a-node-s36b",
Organization: []string{"system:nodes"},
},
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
BasicConstraintsValid: true,
NotAfter: clock.Now().Add(1 * time.Hour),
PublicKeyAlgorithm: x509.ECDSA,
SignatureAlgorithm: x509.SHA256WithRSA,
MaxPathLen: -1,
}
if !cmp.Equal(*certs[0], want, diff.IgnoreUnset()) {
t.Errorf("unexpected diff: %v", cmp.Diff(certs[0], want, diff.IgnoreUnset()))
}
}

View File

@ -19,6 +19,7 @@ package diff
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"reflect"
"strings" "strings"
"text/tabwriter" "text/tabwriter"
@ -116,3 +117,41 @@ func ObjectGoPrintSideBySide(a, b interface{}) string {
w.Flush() w.Flush()
return buf.String() return buf.String()
} }
// IgnoreUnset is an option that ignores fields that are unset on the right
// hand side of a comparison. This is useful in testing to assert that an
// object is a derivative.
func IgnoreUnset() cmp.Option {
return cmp.Options{
// ignore unset fields in v2
cmp.FilterPath(func(path cmp.Path) bool {
_, v2 := path.Last().Values()
switch v2.Kind() {
case reflect.Slice, reflect.Map:
if v2.IsNil() || v2.Len() == 0 {
return true
}
case reflect.String:
if v2.Len() == 0 {
return true
}
case reflect.Interface, reflect.Ptr:
if v2.IsNil() {
return true
}
}
return false
}, cmp.Ignore()),
// ignore map entries that aren't set in v2
cmp.FilterPath(func(path cmp.Path) bool {
switch i := path.Last().(type) {
case cmp.MapIndex:
if _, v2 := i.Values(); !v2.IsValid() {
fmt.Println("E")
return true
}
}
return false
}, cmp.Ignore()),
}
}

View File

@ -3261,10 +3261,15 @@ func describeCertificateSigningRequest(csr *certificatesv1beta1.CertificateSigni
printListHelper(w, "\t", "StreetAddress", cr.Subject.StreetAddress) printListHelper(w, "\t", "StreetAddress", cr.Subject.StreetAddress)
printListHelper(w, "\t", "PostalCode", cr.Subject.PostalCode) printListHelper(w, "\t", "PostalCode", cr.Subject.PostalCode)
if len(cr.DNSNames)+len(cr.EmailAddresses)+len(cr.IPAddresses) > 0 { if len(cr.DNSNames)+len(cr.EmailAddresses)+len(cr.IPAddresses)+len(cr.URIs) > 0 {
w.Write(LEVEL_0, "Subject Alternative Names:\n") w.Write(LEVEL_0, "Subject Alternative Names:\n")
printListHelper(w, "\t", "DNS Names", cr.DNSNames) printListHelper(w, "\t", "DNS Names", cr.DNSNames)
printListHelper(w, "\t", "Email Addresses", cr.EmailAddresses) printListHelper(w, "\t", "Email Addresses", cr.EmailAddresses)
var uris []string
for _, uri := range cr.URIs {
uris = append(uris, uri.String())
}
printListHelper(w, "\t", "URIs", uris)
var ipaddrs []string var ipaddrs []string
for _, ipaddr := range cr.IPAddresses { for _, ipaddr := range cr.IPAddresses {
ipaddrs = append(ipaddrs, ipaddr.String()) ipaddrs = append(ipaddrs, ipaddr.String())

13
vendor/BUILD vendored
View File

@ -75,17 +75,6 @@ filegroup(
"//vendor/github.com/chai2010/gettext-go/gettext:all-srcs", "//vendor/github.com/chai2010/gettext-go/gettext:all-srcs",
"//vendor/github.com/checkpoint-restore/go-criu/rpc:all-srcs", "//vendor/github.com/checkpoint-restore/go-criu/rpc:all-srcs",
"//vendor/github.com/client9/misspell:all-srcs", "//vendor/github.com/client9/misspell:all-srcs",
"//vendor/github.com/cloudflare/cfssl/auth:all-srcs",
"//vendor/github.com/cloudflare/cfssl/certdb:all-srcs",
"//vendor/github.com/cloudflare/cfssl/config:all-srcs",
"//vendor/github.com/cloudflare/cfssl/crypto/pkcs7:all-srcs",
"//vendor/github.com/cloudflare/cfssl/csr:all-srcs",
"//vendor/github.com/cloudflare/cfssl/errors:all-srcs",
"//vendor/github.com/cloudflare/cfssl/helpers:all-srcs",
"//vendor/github.com/cloudflare/cfssl/info:all-srcs",
"//vendor/github.com/cloudflare/cfssl/log:all-srcs",
"//vendor/github.com/cloudflare/cfssl/ocsp/config:all-srcs",
"//vendor/github.com/cloudflare/cfssl/signer:all-srcs",
"//vendor/github.com/clusterhq/flocker-go:all-srcs", "//vendor/github.com/clusterhq/flocker-go:all-srcs",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi:all-srcs", "//vendor/github.com/container-storage-interface/spec/lib/go/csi:all-srcs",
"//vendor/github.com/containerd/console:all-srcs", "//vendor/github.com/containerd/console:all-srcs",
@ -212,7 +201,6 @@ filegroup(
"//vendor/github.com/google/cadvisor/version:all-srcs", "//vendor/github.com/google/cadvisor/version:all-srcs",
"//vendor/github.com/google/cadvisor/watcher:all-srcs", "//vendor/github.com/google/cadvisor/watcher:all-srcs",
"//vendor/github.com/google/cadvisor/zfs:all-srcs", "//vendor/github.com/google/cadvisor/zfs:all-srcs",
"//vendor/github.com/google/certificate-transparency-go:all-srcs",
"//vendor/github.com/google/go-cmp/cmp:all-srcs", "//vendor/github.com/google/go-cmp/cmp:all-srcs",
"//vendor/github.com/google/gofuzz:all-srcs", "//vendor/github.com/google/gofuzz:all-srcs",
"//vendor/github.com/google/uuid:all-srcs", "//vendor/github.com/google/uuid:all-srcs",
@ -361,7 +349,6 @@ filegroup(
"//vendor/golang.org/x/crypto/internal/chacha20:all-srcs", "//vendor/golang.org/x/crypto/internal/chacha20:all-srcs",
"//vendor/golang.org/x/crypto/internal/subtle:all-srcs", "//vendor/golang.org/x/crypto/internal/subtle:all-srcs",
"//vendor/golang.org/x/crypto/nacl/secretbox:all-srcs", "//vendor/golang.org/x/crypto/nacl/secretbox:all-srcs",
"//vendor/golang.org/x/crypto/ocsp:all-srcs",
"//vendor/golang.org/x/crypto/pbkdf2:all-srcs", "//vendor/golang.org/x/crypto/pbkdf2:all-srcs",
"//vendor/golang.org/x/crypto/pkcs12:all-srcs", "//vendor/golang.org/x/crypto/pkcs12:all-srcs",
"//vendor/golang.org/x/crypto/poly1305:all-srcs", "//vendor/golang.org/x/crypto/poly1305:all-srcs",

View File

@ -1,24 +0,0 @@
Copyright (c) 2014 CloudFlare Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,23 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["auth.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/cloudflare/cfssl/auth",
importpath = "github.com/cloudflare/cfssl/auth",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,94 +0,0 @@
// Package auth implements an interface for providing CFSSL
// authentication. This is meant to authenticate a client CFSSL to a
// remote CFSSL in order to prevent unauthorised use of the signature
// capabilities. This package provides both the interface and a
// standard HMAC-based implementation.
package auth
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"strings"
)
// An AuthenticatedRequest contains a request and authentication
// token. The Provider may determine whether to validate the timestamp
// and remote address.
type AuthenticatedRequest struct {
// An Authenticator decides whether to use this field.
Timestamp int64 `json:"timestamp,omitempty"`
RemoteAddress []byte `json:"remote_address,omitempty"`
Token []byte `json:"token"`
Request []byte `json:"request"`
}
// A Provider can generate tokens from a request and verify a
// request. The handling of additional authentication data (such as
// the IP address) is handled by the concrete type, as is any
// serialisation and state-keeping.
type Provider interface {
Token(req []byte) (token []byte, err error)
Verify(aReq *AuthenticatedRequest) bool
}
// Standard implements an HMAC-SHA-256 authentication provider. It may
// be supplied additional data at creation time that will be used as
// request || additional-data with the HMAC.
type Standard struct {
key []byte
ad []byte
}
// New generates a new standard authentication provider from the key
// and additional data. The additional data will be used when
// generating a new token.
func New(key string, ad []byte) (*Standard, error) {
if splitKey := strings.SplitN(key, ":", 2); len(splitKey) == 2 {
switch splitKey[0] {
case "env":
key = os.Getenv(splitKey[1])
case "file":
data, err := ioutil.ReadFile(splitKey[1])
if err != nil {
return nil, err
}
key = string(data)
default:
return nil, fmt.Errorf("unknown key prefix: %s", splitKey[0])
}
}
keyBytes, err := hex.DecodeString(key)
if err != nil {
return nil, err
}
return &Standard{keyBytes, ad}, nil
}
// Token generates a new authentication token from the request.
func (p Standard) Token(req []byte) (token []byte, err error) {
h := hmac.New(sha256.New, p.key)
h.Write(req)
h.Write(p.ad)
return h.Sum(nil), nil
}
// Verify determines whether an authenticated request is valid.
func (p Standard) Verify(ad *AuthenticatedRequest) bool {
if ad == nil {
return false
}
// Standard token generation returns no error.
token, _ := p.Token(ad.Request)
if len(ad.Token) != len(token) {
return false
}
return hmac.Equal(token, ad.Token)
}

View File

@ -1,23 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["certdb.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/cloudflare/cfssl/certdb",
importpath = "github.com/cloudflare/cfssl/certdb",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,75 +0,0 @@
# certdb usage
Using a database enables additional functionality for existing commands when a
db config is provided:
- `sign` and `gencert` add a certificate to the certdb after signing it
- `serve` enables database functionality for the sign and revoke endpoints
A database is required for the following:
- `revoke` marks certificates revoked in the database with an optional reason
- `ocsprefresh` refreshes the table of cached OCSP responses
- `ocspdump` outputs cached OCSP responses in a concatenated base64-encoded format
## Setup/Migration
This directory stores [goose](https://bitbucket.org/liamstask/goose/) db migration scripts for various DB backends.
Currently supported:
- MySQL in mysql
- PostgreSQL in pg
- SQLite in sqlite
### Get goose
go get bitbucket.org/liamstask/goose/cmd/goose
### Use goose to start and terminate a MySQL DB
To start a MySQL using goose:
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql up
To tear down a MySQL DB using goose
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql down
Note: the administration of MySQL DB is not included. We assume
the databases being connected to are already created and access control
is properly handled.
### Use goose to start and terminate a PostgreSQL DB
To start a PostgreSQL using goose:
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg up
To tear down a PostgreSQL DB using goose
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg down
Note: the administration of PostgreSQL DB is not included. We assume
the databases being connected to are already created and access control
is properly handled.
### Use goose to start and terminate a SQLite DB
To start a SQLite DB using goose:
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite up
To tear down a SQLite DB using goose
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite down
## CFSSL Configuration
Several cfssl commands take a -db-config flag. Create a file with a
JSON dictionary:
{"driver":"sqlite3","data_source":"certs.db"}
or
{"driver":"postgres","data_source":"postgres://user:password@host/db"}
or
{"driver":"mysql","data_source":"user:password@tcp(hostname:3306)/db?parseTime=true"}

View File

@ -1,42 +0,0 @@
package certdb
import (
"time"
)
// CertificateRecord encodes a certificate and its metadata
// that will be recorded in a database.
type CertificateRecord struct {
Serial string `db:"serial_number"`
AKI string `db:"authority_key_identifier"`
CALabel string `db:"ca_label"`
Status string `db:"status"`
Reason int `db:"reason"`
Expiry time.Time `db:"expiry"`
RevokedAt time.Time `db:"revoked_at"`
PEM string `db:"pem"`
}
// OCSPRecord encodes a OCSP response body and its metadata
// that will be recorded in a database.
type OCSPRecord struct {
Serial string `db:"serial_number"`
AKI string `db:"authority_key_identifier"`
Body string `db:"body"`
Expiry time.Time `db:"expiry"`
}
// Accessor abstracts the CRUD of certdb objects from a DB.
type Accessor interface {
InsertCertificate(cr CertificateRecord) error
GetCertificate(serial, aki string) ([]CertificateRecord, error)
GetUnexpiredCertificates() ([]CertificateRecord, error)
GetRevokedAndUnexpiredCertificates() ([]CertificateRecord, error)
GetRevokedAndUnexpiredCertificatesByLabel(label string) ([]CertificateRecord, error)
RevokeCertificate(serial, aki string, reasonCode int) error
InsertOCSP(rr OCSPRecord) error
GetOCSP(serial, aki string) ([]OCSPRecord, error)
GetUnexpiredOCSPs() ([]OCSPRecord, error)
UpdateOCSP(serial, aki, body string, expiry time.Time) error
UpsertOCSP(serial, aki, body string, expiry time.Time) error
}

View File

@ -1,30 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["config.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/cloudflare/cfssl/config",
importpath = "github.com/cloudflare/cfssl/config",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/cloudflare/cfssl/auth:go_default_library",
"//vendor/github.com/cloudflare/cfssl/errors:go_default_library",
"//vendor/github.com/cloudflare/cfssl/helpers:go_default_library",
"//vendor/github.com/cloudflare/cfssl/log:go_default_library",
"//vendor/github.com/cloudflare/cfssl/ocsp/config:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,659 +0,0 @@
// Package config contains the configuration logic for CFSSL.
package config
import (
"crypto/tls"
"crypto/x509"
"encoding/asn1"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"regexp"
"strconv"
"strings"
"time"
"github.com/cloudflare/cfssl/auth"
cferr "github.com/cloudflare/cfssl/errors"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/log"
ocspConfig "github.com/cloudflare/cfssl/ocsp/config"
)
// A CSRWhitelist stores booleans for fields in the CSR. If a CSRWhitelist is
// not present in a SigningProfile, all of these fields may be copied from the
// CSR into the signed certificate. If a CSRWhitelist *is* present in a
// SigningProfile, only those fields with a `true` value in the CSRWhitelist may
// be copied from the CSR to the signed certificate. Note that some of these
// fields, like Subject, can be provided or partially provided through the API.
// Since API clients are expected to be trusted, but CSRs are not, fields
// provided through the API are not subject to whitelisting through this
// mechanism.
type CSRWhitelist struct {
Subject, PublicKeyAlgorithm, PublicKey, SignatureAlgorithm bool
DNSNames, IPAddresses, EmailAddresses bool
}
// OID is our own version of asn1's ObjectIdentifier, so we can define a custom
// JSON marshal / unmarshal.
type OID asn1.ObjectIdentifier
// CertificatePolicy represents the ASN.1 PolicyInformation structure from
// https://tools.ietf.org/html/rfc3280.html#page-106.
// Valid values of Type are "id-qt-unotice" and "id-qt-cps"
type CertificatePolicy struct {
ID OID
Qualifiers []CertificatePolicyQualifier
}
// CertificatePolicyQualifier represents a single qualifier from an ASN.1
// PolicyInformation structure.
type CertificatePolicyQualifier struct {
Type string
Value string
}
// AuthRemote is an authenticated remote signer.
type AuthRemote struct {
RemoteName string `json:"remote"`
AuthKeyName string `json:"auth_key"`
}
// CAConstraint specifies various CA constraints on the signed certificate.
// CAConstraint would verify against (and override) the CA
// extensions in the given CSR.
type CAConstraint struct {
IsCA bool `json:"is_ca"`
MaxPathLen int `json:"max_path_len"`
MaxPathLenZero bool `json:"max_path_len_zero"`
}
// A SigningProfile stores information that the CA needs to store
// signature policy.
type SigningProfile struct {
Usage []string `json:"usages"`
IssuerURL []string `json:"issuer_urls"`
OCSP string `json:"ocsp_url"`
CRL string `json:"crl_url"`
CAConstraint CAConstraint `json:"ca_constraint"`
OCSPNoCheck bool `json:"ocsp_no_check"`
ExpiryString string `json:"expiry"`
BackdateString string `json:"backdate"`
AuthKeyName string `json:"auth_key"`
RemoteName string `json:"remote"`
NotBefore time.Time `json:"not_before"`
NotAfter time.Time `json:"not_after"`
NameWhitelistString string `json:"name_whitelist"`
AuthRemote AuthRemote `json:"auth_remote"`
CTLogServers []string `json:"ct_log_servers"`
AllowedExtensions []OID `json:"allowed_extensions"`
CertStore string `json:"cert_store"`
Policies []CertificatePolicy
Expiry time.Duration
Backdate time.Duration
Provider auth.Provider
RemoteProvider auth.Provider
RemoteServer string
RemoteCAs *x509.CertPool
ClientCert *tls.Certificate
CSRWhitelist *CSRWhitelist
NameWhitelist *regexp.Regexp
ExtensionWhitelist map[string]bool
ClientProvidesSerialNumbers bool
}
// UnmarshalJSON unmarshals a JSON string into an OID.
func (oid *OID) UnmarshalJSON(data []byte) (err error) {
if data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("OID JSON string not wrapped in quotes." + string(data))
}
data = data[1 : len(data)-1]
parsedOid, err := parseObjectIdentifier(string(data))
if err != nil {
return err
}
*oid = OID(parsedOid)
return
}
// MarshalJSON marshals an oid into a JSON string.
func (oid OID) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%v"`, asn1.ObjectIdentifier(oid))), nil
}
func parseObjectIdentifier(oidString string) (oid asn1.ObjectIdentifier, err error) {
validOID, err := regexp.MatchString("\\d(\\.\\d+)*", oidString)
if err != nil {
return
}
if !validOID {
err = errors.New("Invalid OID")
return
}
segments := strings.Split(oidString, ".")
oid = make(asn1.ObjectIdentifier, len(segments))
for i, intString := range segments {
oid[i], err = strconv.Atoi(intString)
if err != nil {
return
}
}
return
}
const timeFormat = "2006-01-02T15:04:05"
// populate is used to fill in the fields that are not in JSON
//
// First, the ExpiryString parameter is needed to parse
// expiration timestamps from JSON. The JSON decoder is not able to
// decode a string time duration to a time.Duration, so this is called
// when loading the configuration to properly parse and fill out the
// Expiry parameter.
// This function is also used to create references to the auth key
// and default remote for the profile.
// It returns true if ExpiryString is a valid representation of a
// time.Duration, and the AuthKeyString and RemoteName point to
// valid objects. It returns false otherwise.
func (p *SigningProfile) populate(cfg *Config) error {
if p == nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("can't parse nil profile"))
}
var err error
if p.RemoteName == "" && p.AuthRemote.RemoteName == "" {
log.Debugf("parse expiry in profile")
if p.ExpiryString == "" {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("empty expiry string"))
}
dur, err := time.ParseDuration(p.ExpiryString)
if err != nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
}
log.Debugf("expiry is valid")
p.Expiry = dur
if p.BackdateString != "" {
dur, err = time.ParseDuration(p.BackdateString)
if err != nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
}
p.Backdate = dur
}
if !p.NotBefore.IsZero() && !p.NotAfter.IsZero() && p.NotAfter.Before(p.NotBefore) {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
}
if len(p.Policies) > 0 {
for _, policy := range p.Policies {
for _, qualifier := range policy.Qualifiers {
if qualifier.Type != "" && qualifier.Type != "id-qt-unotice" && qualifier.Type != "id-qt-cps" {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("invalid policy qualifier type"))
}
}
}
}
} else if p.RemoteName != "" {
log.Debug("match remote in profile to remotes section")
if p.AuthRemote.RemoteName != "" {
log.Error("profile has both a remote and an auth remote specified")
return cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
}
if remote := cfg.Remotes[p.RemoteName]; remote != "" {
if err := p.updateRemote(remote); err != nil {
return err
}
} else {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to find remote in remotes section"))
}
} else {
log.Debug("match auth remote in profile to remotes section")
if remote := cfg.Remotes[p.AuthRemote.RemoteName]; remote != "" {
if err := p.updateRemote(remote); err != nil {
return err
}
} else {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to find remote in remotes section"))
}
}
if p.AuthKeyName != "" {
log.Debug("match auth key in profile to auth_keys section")
if key, ok := cfg.AuthKeys[p.AuthKeyName]; ok == true {
if key.Type == "standard" {
p.Provider, err = auth.New(key.Key, nil)
if err != nil {
log.Debugf("failed to create new standard auth provider: %v", err)
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to create new standard auth provider"))
}
} else {
log.Debugf("unknown authentication type %v", key.Type)
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("unknown authentication type"))
}
} else {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to find auth_key in auth_keys section"))
}
}
if p.AuthRemote.AuthKeyName != "" {
log.Debug("match auth remote key in profile to auth_keys section")
if key, ok := cfg.AuthKeys[p.AuthRemote.AuthKeyName]; ok == true {
if key.Type == "standard" {
p.RemoteProvider, err = auth.New(key.Key, nil)
if err != nil {
log.Debugf("failed to create new standard auth provider: %v", err)
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to create new standard auth provider"))
}
} else {
log.Debugf("unknown authentication type %v", key.Type)
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("unknown authentication type"))
}
} else {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to find auth_remote's auth_key in auth_keys section"))
}
}
if p.NameWhitelistString != "" {
log.Debug("compiling whitelist regular expression")
rule, err := regexp.Compile(p.NameWhitelistString)
if err != nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to compile name whitelist section"))
}
p.NameWhitelist = rule
}
p.ExtensionWhitelist = map[string]bool{}
for _, oid := range p.AllowedExtensions {
p.ExtensionWhitelist[asn1.ObjectIdentifier(oid).String()] = true
}
return nil
}
// updateRemote takes a signing profile and initializes the remote server object
// to the hostname:port combination sent by remote.
func (p *SigningProfile) updateRemote(remote string) error {
if remote != "" {
p.RemoteServer = remote
}
return nil
}
// OverrideRemotes takes a signing configuration and updates the remote server object
// to the hostname:port combination sent by remote
func (p *Signing) OverrideRemotes(remote string) error {
if remote != "" {
var err error
for _, profile := range p.Profiles {
err = profile.updateRemote(remote)
if err != nil {
return err
}
}
err = p.Default.updateRemote(remote)
if err != nil {
return err
}
}
return nil
}
// SetClientCertKeyPairFromFile updates the properties to set client certificates for mutual
// authenticated TLS remote requests
func (p *Signing) SetClientCertKeyPairFromFile(certFile string, keyFile string) error {
if certFile != "" && keyFile != "" {
cert, err := helpers.LoadClientCertificate(certFile, keyFile)
if err != nil {
return err
}
for _, profile := range p.Profiles {
profile.ClientCert = cert
}
p.Default.ClientCert = cert
}
return nil
}
// SetRemoteCAsFromFile reads root CAs from file and updates the properties to set remote CAs for TLS
// remote requests
func (p *Signing) SetRemoteCAsFromFile(caFile string) error {
if caFile != "" {
remoteCAs, err := helpers.LoadPEMCertPool(caFile)
if err != nil {
return err
}
p.SetRemoteCAs(remoteCAs)
}
return nil
}
// SetRemoteCAs updates the properties to set remote CAs for TLS
// remote requests
func (p *Signing) SetRemoteCAs(remoteCAs *x509.CertPool) {
for _, profile := range p.Profiles {
profile.RemoteCAs = remoteCAs
}
p.Default.RemoteCAs = remoteCAs
}
// NeedsRemoteSigner returns true if one of the profiles has a remote set
func (p *Signing) NeedsRemoteSigner() bool {
for _, profile := range p.Profiles {
if profile.RemoteServer != "" {
return true
}
}
if p.Default.RemoteServer != "" {
return true
}
return false
}
// NeedsLocalSigner returns true if one of the profiles doe not have a remote set
func (p *Signing) NeedsLocalSigner() bool {
for _, profile := range p.Profiles {
if profile.RemoteServer == "" {
return true
}
}
if p.Default.RemoteServer == "" {
return true
}
return false
}
// Usages parses the list of key uses in the profile, translating them
// to a list of X.509 key usages and extended key usages. The unknown
// uses are collected into a slice that is also returned.
func (p *SigningProfile) Usages() (ku x509.KeyUsage, eku []x509.ExtKeyUsage, unk []string) {
for _, keyUse := range p.Usage {
if kuse, ok := KeyUsage[keyUse]; ok {
ku |= kuse
} else if ekuse, ok := ExtKeyUsage[keyUse]; ok {
eku = append(eku, ekuse)
} else {
unk = append(unk, keyUse)
}
}
return
}
// A valid profile must be a valid local profile or a valid remote profile.
// A valid local profile has defined at least key usages to be used, and a
// valid local default profile has defined at least a default expiration.
// A valid remote profile (default or not) has remote signer initialized.
// In addition, a remote profile must has a valid auth provider if auth
// key defined.
func (p *SigningProfile) validProfile(isDefault bool) bool {
if p == nil {
return false
}
if p.AuthRemote.RemoteName == "" && p.AuthRemote.AuthKeyName != "" {
log.Debugf("invalid auth remote profile: no remote signer specified")
return false
}
if p.RemoteName != "" {
log.Debugf("validate remote profile")
if p.RemoteServer == "" {
log.Debugf("invalid remote profile: no remote signer specified")
return false
}
if p.AuthKeyName != "" && p.Provider == nil {
log.Debugf("invalid remote profile: auth key name is defined but no auth provider is set")
return false
}
if p.AuthRemote.RemoteName != "" {
log.Debugf("invalid remote profile: auth remote is also specified")
return false
}
} else if p.AuthRemote.RemoteName != "" {
log.Debugf("validate auth remote profile")
if p.RemoteServer == "" {
log.Debugf("invalid auth remote profile: no remote signer specified")
return false
}
if p.AuthRemote.AuthKeyName == "" || p.RemoteProvider == nil {
log.Debugf("invalid auth remote profile: no auth key is defined")
return false
}
} else {
log.Debugf("validate local profile")
if !isDefault {
if len(p.Usage) == 0 {
log.Debugf("invalid local profile: no usages specified")
return false
} else if _, _, unk := p.Usages(); len(unk) == len(p.Usage) {
log.Debugf("invalid local profile: no valid usages")
return false
}
} else {
if p.Expiry == 0 {
log.Debugf("invalid local profile: no expiry set")
return false
}
}
}
log.Debugf("profile is valid")
return true
}
// This checks if the SigningProfile object contains configurations that are only effective with a local signer
// which has access to CA private key.
func (p *SigningProfile) hasLocalConfig() bool {
if p.Usage != nil ||
p.IssuerURL != nil ||
p.OCSP != "" ||
p.ExpiryString != "" ||
p.BackdateString != "" ||
p.CAConstraint.IsCA != false ||
!p.NotBefore.IsZero() ||
!p.NotAfter.IsZero() ||
p.NameWhitelistString != "" ||
len(p.CTLogServers) != 0 {
return true
}
return false
}
// warnSkippedSettings prints a log warning message about skipped settings
// in a SigningProfile, usually due to remote signer.
func (p *Signing) warnSkippedSettings() {
const warningMessage = `The configuration value by "usages", "issuer_urls", "ocsp_url", "crl_url", "ca_constraint", "expiry", "backdate", "not_before", "not_after", "cert_store" and "ct_log_servers" are skipped`
if p == nil {
return
}
if (p.Default.RemoteName != "" || p.Default.AuthRemote.RemoteName != "") && p.Default.hasLocalConfig() {
log.Warning("default profile points to a remote signer: ", warningMessage)
}
for name, profile := range p.Profiles {
if (profile.RemoteName != "" || profile.AuthRemote.RemoteName != "") && profile.hasLocalConfig() {
log.Warningf("Profiles[%s] points to a remote signer: %s", name, warningMessage)
}
}
}
// Signing codifies the signature configuration policy for a CA.
type Signing struct {
Profiles map[string]*SigningProfile `json:"profiles"`
Default *SigningProfile `json:"default"`
}
// Config stores configuration information for the CA.
type Config struct {
Signing *Signing `json:"signing"`
OCSP *ocspConfig.Config `json:"ocsp"`
AuthKeys map[string]AuthKey `json:"auth_keys,omitempty"`
Remotes map[string]string `json:"remotes,omitempty"`
}
// Valid ensures that Config is a valid configuration. It should be
// called immediately after parsing a configuration file.
func (c *Config) Valid() bool {
return c.Signing.Valid()
}
// Valid checks the signature policies, ensuring they are valid
// policies. A policy is valid if it has defined at least key usages
// to be used, and a valid default profile has defined at least a
// default expiration.
func (p *Signing) Valid() bool {
if p == nil {
return false
}
log.Debugf("validating configuration")
if !p.Default.validProfile(true) {
log.Debugf("default profile is invalid")
return false
}
for _, sp := range p.Profiles {
if !sp.validProfile(false) {
log.Debugf("invalid profile")
return false
}
}
p.warnSkippedSettings()
return true
}
// KeyUsage contains a mapping of string names to key usages.
var KeyUsage = map[string]x509.KeyUsage{
"signing": x509.KeyUsageDigitalSignature,
"digital signature": x509.KeyUsageDigitalSignature,
"content commitment": x509.KeyUsageContentCommitment,
"key encipherment": x509.KeyUsageKeyEncipherment,
"key agreement": x509.KeyUsageKeyAgreement,
"data encipherment": x509.KeyUsageDataEncipherment,
"cert sign": x509.KeyUsageCertSign,
"crl sign": x509.KeyUsageCRLSign,
"encipher only": x509.KeyUsageEncipherOnly,
"decipher only": x509.KeyUsageDecipherOnly,
}
// ExtKeyUsage contains a mapping of string names to extended key
// usages.
var ExtKeyUsage = map[string]x509.ExtKeyUsage{
"any": x509.ExtKeyUsageAny,
"server auth": x509.ExtKeyUsageServerAuth,
"client auth": x509.ExtKeyUsageClientAuth,
"code signing": x509.ExtKeyUsageCodeSigning,
"email protection": x509.ExtKeyUsageEmailProtection,
"s/mime": x509.ExtKeyUsageEmailProtection,
"ipsec end system": x509.ExtKeyUsageIPSECEndSystem,
"ipsec tunnel": x509.ExtKeyUsageIPSECTunnel,
"ipsec user": x509.ExtKeyUsageIPSECUser,
"timestamping": x509.ExtKeyUsageTimeStamping,
"ocsp signing": x509.ExtKeyUsageOCSPSigning,
"microsoft sgc": x509.ExtKeyUsageMicrosoftServerGatedCrypto,
"netscape sgc": x509.ExtKeyUsageNetscapeServerGatedCrypto,
}
// An AuthKey contains an entry for a key used for authentication.
type AuthKey struct {
// Type contains information needed to select the appropriate
// constructor. For example, "standard" for HMAC-SHA-256,
// "standard-ip" for HMAC-SHA-256 incorporating the client's
// IP.
Type string `json:"type"`
// Key contains the key information, such as a hex-encoded
// HMAC key.
Key string `json:"key"`
}
// DefaultConfig returns a default configuration specifying basic key
// usage and a 1 year expiration time. The key usages chosen are
// signing, key encipherment, client auth and server auth.
func DefaultConfig() *SigningProfile {
d := helpers.OneYear
return &SigningProfile{
Usage: []string{"signing", "key encipherment", "server auth", "client auth"},
Expiry: d,
ExpiryString: "8760h",
}
}
// LoadFile attempts to load the configuration file stored at the path
// and returns the configuration. On error, it returns nil.
func LoadFile(path string) (*Config, error) {
log.Debugf("loading configuration file from %s", path)
if path == "" {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid path"))
}
body, err := ioutil.ReadFile(path)
if err != nil {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("could not read configuration file"))
}
return LoadConfig(body)
}
// LoadConfig attempts to load the configuration from a byte slice.
// On error, it returns nil.
func LoadConfig(config []byte) (*Config, error) {
var cfg = &Config{}
err := json.Unmarshal(config, &cfg)
if err != nil {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
errors.New("failed to unmarshal configuration: "+err.Error()))
}
if cfg.Signing == nil {
return nil, errors.New("No \"signing\" field present")
}
if cfg.Signing.Default == nil {
log.Debugf("no default given: using default config")
cfg.Signing.Default = DefaultConfig()
} else {
if err := cfg.Signing.Default.populate(cfg); err != nil {
return nil, err
}
}
for k := range cfg.Signing.Profiles {
if err := cfg.Signing.Profiles[k].populate(cfg); err != nil {
return nil, err
}
}
if !cfg.Valid() {
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid configuration"))
}
log.Debugf("configuration ok")
return cfg, nil
}

View File

@ -1,24 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["pkcs7.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/cloudflare/cfssl/crypto/pkcs7",
importpath = "github.com/cloudflare/cfssl/crypto/pkcs7",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/cloudflare/cfssl/errors:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,188 +0,0 @@
// Package pkcs7 implements the subset of the CMS PKCS #7 datatype that is typically
// used to package certificates and CRLs. Using openssl, every certificate converted
// to PKCS #7 format from another encoding such as PEM conforms to this implementation.
// reference: https://www.openssl.org/docs/man1.1.0/apps/crl2pkcs7.html
//
// PKCS #7 Data type, reference: https://tools.ietf.org/html/rfc2315
//
// The full pkcs#7 cryptographic message syntax allows for cryptographic enhancements,
// for example data can be encrypted and signed and then packaged through pkcs#7 to be
// sent over a network and then verified and decrypted. It is asn1, and the type of
// PKCS #7 ContentInfo, which comprises the PKCS #7 structure, is:
//
// ContentInfo ::= SEQUENCE {
// contentType ContentType,
// content [0] EXPLICIT ANY DEFINED BY contentType OPTIONAL
// }
//
// There are 6 possible ContentTypes, data, signedData, envelopedData,
// signedAndEnvelopedData, digestedData, and encryptedData. Here signedData, Data, and encrypted
// Data are implemented, as the degenerate case of signedData without a signature is the typical
// format for transferring certificates and CRLS, and Data and encryptedData are used in PKCS #12
// formats.
// The ContentType signedData has the form:
//
//
// signedData ::= SEQUENCE {
// version Version,
// digestAlgorithms DigestAlgorithmIdentifiers,
// contentInfo ContentInfo,
// certificates [0] IMPLICIT ExtendedCertificatesAndCertificates OPTIONAL
// crls [1] IMPLICIT CertificateRevocationLists OPTIONAL,
// signerInfos SignerInfos
// }
//
// As of yet signerInfos and digestAlgorithms are not parsed, as they are not relevant to
// this system's use of PKCS #7 data. Version is an integer type, note that PKCS #7 is
// recursive, this second layer of ContentInfo is similar ignored for our degenerate
// usage. The ExtendedCertificatesAndCertificates type consists of a sequence of choices
// between PKCS #6 extended certificates and x509 certificates. Any sequence consisting
// of any number of extended certificates is not yet supported in this implementation.
//
// The ContentType Data is simply a raw octet string and is parsed directly into a Go []byte slice.
//
// The ContentType encryptedData is the most complicated and its form can be gathered by
// the go type below. It essentially contains a raw octet string of encrypted data and an
// algorithm identifier for use in decrypting this data.
package pkcs7
import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"errors"
cferr "github.com/cloudflare/cfssl/errors"
)
// Types used for asn1 Unmarshaling.
type signedData struct {
Version int
DigestAlgorithms asn1.RawValue
ContentInfo asn1.RawValue
Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"`
Crls asn1.RawValue `asn1:"optional"`
SignerInfos asn1.RawValue
}
type initPKCS7 struct {
Raw asn1.RawContent
ContentType asn1.ObjectIdentifier
Content asn1.RawValue `asn1:"tag:0,explicit,optional"`
}
// Object identifier strings of the three implemented PKCS7 types.
const (
ObjIDData = "1.2.840.113549.1.7.1"
ObjIDSignedData = "1.2.840.113549.1.7.2"
ObjIDEncryptedData = "1.2.840.113549.1.7.6"
)
// PKCS7 represents the ASN1 PKCS #7 Content type. It contains one of three
// possible types of Content objects, as denoted by the object identifier in
// the ContentInfo field, the other two being nil. SignedData
// is the degenerate SignedData Content info without signature used
// to hold certificates and crls. Data is raw bytes, and EncryptedData
// is as defined in PKCS #7 standard.
type PKCS7 struct {
Raw asn1.RawContent
ContentInfo string
Content Content
}
// Content implements three of the six possible PKCS7 data types. Only one is non-nil.
type Content struct {
Data []byte
SignedData SignedData
EncryptedData EncryptedData
}
// SignedData defines the typical carrier of certificates and crls.
type SignedData struct {
Raw asn1.RawContent
Version int
Certificates []*x509.Certificate
Crl *pkix.CertificateList
}
// Data contains raw bytes. Used as a subtype in PKCS12.
type Data struct {
Bytes []byte
}
// EncryptedData contains encrypted data. Used as a subtype in PKCS12.
type EncryptedData struct {
Raw asn1.RawContent
Version int
EncryptedContentInfo EncryptedContentInfo
}
// EncryptedContentInfo is a subtype of PKCS7EncryptedData.
type EncryptedContentInfo struct {
Raw asn1.RawContent
ContentType asn1.ObjectIdentifier
ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
EncryptedContent []byte `asn1:"tag:0,optional"`
}
// ParsePKCS7 attempts to parse the DER encoded bytes of a
// PKCS7 structure.
func ParsePKCS7(raw []byte) (msg *PKCS7, err error) {
var pkcs7 initPKCS7
_, err = asn1.Unmarshal(raw, &pkcs7)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
}
msg = new(PKCS7)
msg.Raw = pkcs7.Raw
msg.ContentInfo = pkcs7.ContentType.String()
switch {
case msg.ContentInfo == ObjIDData:
msg.ContentInfo = "Data"
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &msg.Content.Data)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
}
case msg.ContentInfo == ObjIDSignedData:
msg.ContentInfo = "SignedData"
var signedData signedData
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &signedData)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
}
if len(signedData.Certificates.Bytes) != 0 {
msg.Content.SignedData.Certificates, err = x509.ParseCertificates(signedData.Certificates.Bytes)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
}
}
if len(signedData.Crls.Bytes) != 0 {
msg.Content.SignedData.Crl, err = x509.ParseDERCRL(signedData.Crls.Bytes)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
}
}
msg.Content.SignedData.Version = signedData.Version
msg.Content.SignedData.Raw = pkcs7.Content.Bytes
case msg.ContentInfo == ObjIDEncryptedData:
msg.ContentInfo = "EncryptedData"
var encryptedData EncryptedData
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &encryptedData)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
}
if encryptedData.Version != 0 {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Only support for PKCS #7 encryptedData version 0"))
}
msg.Content.EncryptedData = encryptedData
default:
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Attempt to parse PKCS# 7 Content not of type data, signed data or encrypted data"))
}
return msg, nil
}

View File

@ -1,28 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["csr.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/cloudflare/cfssl/csr",
importpath = "github.com/cloudflare/cfssl/csr",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/cloudflare/cfssl/errors:go_default_library",
"//vendor/github.com/cloudflare/cfssl/helpers:go_default_library",
"//vendor/github.com/cloudflare/cfssl/log:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,432 +0,0 @@
// Package csr implements certificate requests for CFSSL.
package csr
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/pem"
"errors"
"net"
"net/mail"
"strings"
cferr "github.com/cloudflare/cfssl/errors"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/log"
)
const (
curveP256 = 256
curveP384 = 384
curveP521 = 521
)
// A Name contains the SubjectInfo fields.
type Name struct {
C string // Country
ST string // State
L string // Locality
O string // OrganisationName
OU string // OrganisationalUnitName
SerialNumber string
}
// A KeyRequest is a generic request for a new key.
type KeyRequest interface {
Algo() string
Size() int
Generate() (crypto.PrivateKey, error)
SigAlgo() x509.SignatureAlgorithm
}
// A BasicKeyRequest contains the algorithm and key size for a new private key.
type BasicKeyRequest struct {
A string `json:"algo" yaml:"algo"`
S int `json:"size" yaml:"size"`
}
// NewBasicKeyRequest returns a default BasicKeyRequest.
func NewBasicKeyRequest() *BasicKeyRequest {
return &BasicKeyRequest{"ecdsa", curveP256}
}
// Algo returns the requested key algorithm represented as a string.
func (kr *BasicKeyRequest) Algo() string {
return kr.A
}
// Size returns the requested key size.
func (kr *BasicKeyRequest) Size() int {
return kr.S
}
// Generate generates a key as specified in the request. Currently,
// only ECDSA and RSA are supported.
func (kr *BasicKeyRequest) Generate() (crypto.PrivateKey, error) {
log.Debugf("generate key from request: algo=%s, size=%d", kr.Algo(), kr.Size())
switch kr.Algo() {
case "rsa":
if kr.Size() < 2048 {
return nil, errors.New("RSA key is too weak")
}
if kr.Size() > 8192 {
return nil, errors.New("RSA key size too large")
}
return rsa.GenerateKey(rand.Reader, kr.Size())
case "ecdsa":
var curve elliptic.Curve
switch kr.Size() {
case curveP256:
curve = elliptic.P256()
case curveP384:
curve = elliptic.P384()
case curveP521:
curve = elliptic.P521()
default:
return nil, errors.New("invalid curve")
}
return ecdsa.GenerateKey(curve, rand.Reader)
default:
return nil, errors.New("invalid algorithm")
}
}
// SigAlgo returns an appropriate X.509 signature algorithm given the
// key request's type and size.
func (kr *BasicKeyRequest) SigAlgo() x509.SignatureAlgorithm {
switch kr.Algo() {
case "rsa":
switch {
case kr.Size() >= 4096:
return x509.SHA512WithRSA
case kr.Size() >= 3072:
return x509.SHA384WithRSA
case kr.Size() >= 2048:
return x509.SHA256WithRSA
default:
return x509.SHA1WithRSA
}
case "ecdsa":
switch kr.Size() {
case curveP521:
return x509.ECDSAWithSHA512
case curveP384:
return x509.ECDSAWithSHA384
case curveP256:
return x509.ECDSAWithSHA256
default:
return x509.ECDSAWithSHA1
}
default:
return x509.UnknownSignatureAlgorithm
}
}
// CAConfig is a section used in the requests initialising a new CA.
type CAConfig struct {
PathLength int `json:"pathlen" yaml:"pathlen"`
PathLenZero bool `json:"pathlenzero" yaml:"pathlenzero"`
Expiry string `json:"expiry" yaml:"expiry"`
Backdate string `json:"backdate" yaml:"backdate"`
}
// A CertificateRequest encapsulates the API interface to the
// certificate request functionality.
type CertificateRequest struct {
CN string
Names []Name `json:"names" yaml:"names"`
Hosts []string `json:"hosts" yaml:"hosts"`
KeyRequest KeyRequest `json:"key,omitempty" yaml:"key,omitempty"`
CA *CAConfig `json:"ca,omitempty" yaml:"ca,omitempty"`
SerialNumber string `json:"serialnumber,omitempty" yaml:"serialnumber,omitempty"`
}
// New returns a new, empty CertificateRequest with a
// BasicKeyRequest.
func New() *CertificateRequest {
return &CertificateRequest{
KeyRequest: NewBasicKeyRequest(),
}
}
// appendIf appends to a if s is not an empty string.
func appendIf(s string, a *[]string) {
if s != "" {
*a = append(*a, s)
}
}
// Name returns the PKIX name for the request.
func (cr *CertificateRequest) Name() pkix.Name {
var name pkix.Name
name.CommonName = cr.CN
for _, n := range cr.Names {
appendIf(n.C, &name.Country)
appendIf(n.ST, &name.Province)
appendIf(n.L, &name.Locality)
appendIf(n.O, &name.Organization)
appendIf(n.OU, &name.OrganizationalUnit)
}
name.SerialNumber = cr.SerialNumber
return name
}
// BasicConstraints CSR information RFC 5280, 4.2.1.9
type BasicConstraints struct {
IsCA bool `asn1:"optional"`
MaxPathLen int `asn1:"optional,default:-1"`
}
// ParseRequest takes a certificate request and generates a key and
// CSR from it. It does no validation -- caveat emptor. It will,
// however, fail if the key request is not valid (i.e., an unsupported
// curve or RSA key size). The lack of validation was specifically
// chosen to allow the end user to define a policy and validate the
// request appropriately before calling this function.
func ParseRequest(req *CertificateRequest) (csr, key []byte, err error) {
log.Info("received CSR")
if req.KeyRequest == nil {
req.KeyRequest = NewBasicKeyRequest()
}
log.Infof("generating key: %s-%d", req.KeyRequest.Algo(), req.KeyRequest.Size())
priv, err := req.KeyRequest.Generate()
if err != nil {
err = cferr.Wrap(cferr.PrivateKeyError, cferr.GenerationFailed, err)
return
}
switch priv := priv.(type) {
case *rsa.PrivateKey:
key = x509.MarshalPKCS1PrivateKey(priv)
block := pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: key,
}
key = pem.EncodeToMemory(&block)
case *ecdsa.PrivateKey:
key, err = x509.MarshalECPrivateKey(priv)
if err != nil {
err = cferr.Wrap(cferr.PrivateKeyError, cferr.Unknown, err)
return
}
block := pem.Block{
Type: "EC PRIVATE KEY",
Bytes: key,
}
key = pem.EncodeToMemory(&block)
default:
panic("Generate should have failed to produce a valid key.")
}
csr, err = Generate(priv.(crypto.Signer), req)
if err != nil {
log.Errorf("failed to generate a CSR: %v", err)
err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)
}
return
}
// ExtractCertificateRequest extracts a CertificateRequest from
// x509.Certificate. It is aimed to used for generating a new certificate
// from an existing certificate. For a root certificate, the CA expiry
// length is calculated as the duration between cert.NotAfter and cert.NotBefore.
func ExtractCertificateRequest(cert *x509.Certificate) *CertificateRequest {
req := New()
req.CN = cert.Subject.CommonName
req.Names = getNames(cert.Subject)
req.Hosts = getHosts(cert)
req.SerialNumber = cert.Subject.SerialNumber
if cert.IsCA {
req.CA = new(CAConfig)
// CA expiry length is calculated based on the input cert
// issue date and expiry date.
req.CA.Expiry = cert.NotAfter.Sub(cert.NotBefore).String()
req.CA.PathLength = cert.MaxPathLen
req.CA.PathLenZero = cert.MaxPathLenZero
}
return req
}
func getHosts(cert *x509.Certificate) []string {
var hosts []string
for _, ip := range cert.IPAddresses {
hosts = append(hosts, ip.String())
}
for _, dns := range cert.DNSNames {
hosts = append(hosts, dns)
}
for _, email := range cert.EmailAddresses {
hosts = append(hosts, email)
}
return hosts
}
// getNames returns an array of Names from the certificate
// It onnly cares about Country, Organization, OrganizationalUnit, Locality, Province
func getNames(sub pkix.Name) []Name {
// anonymous func for finding the max of a list of interger
max := func(v1 int, vn ...int) (max int) {
max = v1
for i := 0; i < len(vn); i++ {
if vn[i] > max {
max = vn[i]
}
}
return max
}
nc := len(sub.Country)
norg := len(sub.Organization)
nou := len(sub.OrganizationalUnit)
nl := len(sub.Locality)
np := len(sub.Province)
n := max(nc, norg, nou, nl, np)
names := make([]Name, n)
for i := range names {
if i < nc {
names[i].C = sub.Country[i]
}
if i < norg {
names[i].O = sub.Organization[i]
}
if i < nou {
names[i].OU = sub.OrganizationalUnit[i]
}
if i < nl {
names[i].L = sub.Locality[i]
}
if i < np {
names[i].ST = sub.Province[i]
}
}
return names
}
// A Generator is responsible for validating certificate requests.
type Generator struct {
Validator func(*CertificateRequest) error
}
// ProcessRequest validates and processes the incoming request. It is
// a wrapper around a validator and the ParseRequest function.
func (g *Generator) ProcessRequest(req *CertificateRequest) (csr, key []byte, err error) {
log.Info("generate received request")
err = g.Validator(req)
if err != nil {
log.Warningf("invalid request: %v", err)
return nil, nil, err
}
csr, key, err = ParseRequest(req)
if err != nil {
return nil, nil, err
}
return
}
// IsNameEmpty returns true if the name has no identifying information in it.
func IsNameEmpty(n Name) bool {
empty := func(s string) bool { return strings.TrimSpace(s) == "" }
if empty(n.C) && empty(n.ST) && empty(n.L) && empty(n.O) && empty(n.OU) {
return true
}
return false
}
// Regenerate uses the provided CSR as a template for signing a new
// CSR using priv.
func Regenerate(priv crypto.Signer, csr []byte) ([]byte, error) {
req, extra, err := helpers.ParseCSR(csr)
if err != nil {
return nil, err
} else if len(extra) > 0 {
return nil, errors.New("csr: trailing data in certificate request")
}
return x509.CreateCertificateRequest(rand.Reader, req, priv)
}
// Generate creates a new CSR from a CertificateRequest structure and
// an existing key. The KeyRequest field is ignored.
func Generate(priv crypto.Signer, req *CertificateRequest) (csr []byte, err error) {
sigAlgo := helpers.SignerAlgo(priv)
if sigAlgo == x509.UnknownSignatureAlgorithm {
return nil, cferr.New(cferr.PrivateKeyError, cferr.Unavailable)
}
var tpl = x509.CertificateRequest{
Subject: req.Name(),
SignatureAlgorithm: sigAlgo,
}
for i := range req.Hosts {
if ip := net.ParseIP(req.Hosts[i]); ip != nil {
tpl.IPAddresses = append(tpl.IPAddresses, ip)
} else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil {
tpl.EmailAddresses = append(tpl.EmailAddresses, email.Address)
} else {
tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])
}
}
if req.CA != nil {
err = appendCAInfoToCSR(req.CA, &tpl)
if err != nil {
err = cferr.Wrap(cferr.CSRError, cferr.GenerationFailed, err)
return
}
}
csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv)
if err != nil {
log.Errorf("failed to generate a CSR: %v", err)
err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)
return
}
block := pem.Block{
Type: "CERTIFICATE REQUEST",
Bytes: csr,
}
log.Info("encoded CSR")
csr = pem.EncodeToMemory(&block)
return
}
// appendCAInfoToCSR appends CAConfig BasicConstraint extension to a CSR
func appendCAInfoToCSR(reqConf *CAConfig, csr *x509.CertificateRequest) error {
pathlen := reqConf.PathLength
if pathlen == 0 && !reqConf.PathLenZero {
pathlen = -1
}
val, err := asn1.Marshal(BasicConstraints{true, pathlen})
if err != nil {
return err
}
csr.ExtraExtensions = []pkix.Extension{
{
Id: asn1.ObjectIdentifier{2, 5, 29, 19},
Value: val,
Critical: true,
},
}
return nil
}

View File

@ -1,27 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"error.go",
"http.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/cloudflare/cfssl/errors",
importpath = "github.com/cloudflare/cfssl/errors",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,46 +0,0 @@
/*
Package errors provides error types returned in CF SSL.
1. Type Error is intended for errors produced by CF SSL packages.
It formats to a json object that consists of an error message and a 4-digit code for error reasoning.
Example: {"code":1002, "message": "Failed to decode certificate"}
The index of codes are listed below:
1XXX: CertificateError
1000: Unknown
1001: ReadFailed
1002: DecodeFailed
1003: ParseFailed
1100: SelfSigned
12XX: VerifyFailed
121X: CertificateInvalid
1210: NotAuthorizedToSign
1211: Expired
1212: CANotAuthorizedForThisName
1213: TooManyIntermediates
1214: IncompatibleUsage
1220: UnknownAuthority
2XXX: PrivatekeyError
2000: Unknown
2001: ReadFailed
2002: DecodeFailed
2003: ParseFailed
2100: Encrypted
2200: NotRSA
2300: KeyMismatch
2400: GenerationFailed
2500: Unavailable
3XXX: IntermediatesError
4XXX: RootError
5XXX: PolicyError
5100: NoKeyUsages
5200: InvalidPolicy
5300: InvalidRequest
5400: UnknownProfile
6XXX: DialError
2. Type HttpError is intended for CF SSL API to consume. It contains a HTTP status code that will be read and returned
by the API server.
*/
package errors

View File

@ -1,438 +0,0 @@
package errors
import (
"crypto/x509"
"encoding/json"
"fmt"
)
// Error is the error type usually returned by functions in CF SSL package.
// It contains a 4-digit error code where the most significant digit
// describes the category where the error occurred and the rest 3 digits
// describe the specific error reason.
type Error struct {
ErrorCode int `json:"code"`
Message string `json:"message"`
}
// Category is the most significant digit of the error code.
type Category int
// Reason is the last 3 digits of the error code.
type Reason int
const (
// Success indicates no error occurred.
Success Category = 1000 * iota // 0XXX
// CertificateError indicates a fault in a certificate.
CertificateError // 1XXX
// PrivateKeyError indicates a fault in a private key.
PrivateKeyError // 2XXX
// IntermediatesError indicates a fault in an intermediate.
IntermediatesError // 3XXX
// RootError indicates a fault in a root.
RootError // 4XXX
// PolicyError indicates an error arising from a malformed or
// non-existent policy, or a breach of policy.
PolicyError // 5XXX
// DialError indicates a network fault.
DialError // 6XXX
// APIClientError indicates a problem with the API client.
APIClientError // 7XXX
// OCSPError indicates a problem with OCSP signing
OCSPError // 8XXX
// CSRError indicates a problem with CSR parsing
CSRError // 9XXX
// CTError indicates a problem with the certificate transparency process
CTError // 10XXX
// CertStoreError indicates a problem with the certificate store
CertStoreError // 11XXX
)
// None is a non-specified error.
const (
None Reason = iota
)
// Warning code for a success
const (
BundleExpiringBit int = 1 << iota // 0x01
BundleNotUbiquitousBit // 0x02
)
// Parsing errors
const (
Unknown Reason = iota // X000
ReadFailed // X001
DecodeFailed // X002
ParseFailed // X003
)
// The following represent certificate non-parsing errors, and must be
// specified along with CertificateError.
const (
// SelfSigned indicates that a certificate is self-signed and
// cannot be used in the manner being attempted.
SelfSigned Reason = 100 * (iota + 1) // Code 11XX
// VerifyFailed is an X.509 verification failure. The least two
// significant digits of 12XX is determined as the actual x509
// error is examined.
VerifyFailed // Code 12XX
// BadRequest indicates that the certificate request is invalid.
BadRequest // Code 13XX
// MissingSerial indicates that the profile specified
// 'ClientProvidesSerialNumbers', but the SignRequest did not include a serial
// number.
MissingSerial // Code 14XX
)
const (
certificateInvalid = 10 * (iota + 1) //121X
unknownAuthority //122x
)
// The following represent private-key non-parsing errors, and must be
// specified with PrivateKeyError.
const (
// Encrypted indicates that the private key is a PKCS #8 encrypted
// private key. At this time, CFSSL does not support decrypting
// these keys.
Encrypted Reason = 100 * (iota + 1) //21XX
// NotRSAOrECC indicates that they key is not an RSA or ECC
// private key; these are the only two private key types supported
// at this time by CFSSL.
NotRSAOrECC //22XX
// KeyMismatch indicates that the private key does not match
// the public key or certificate being presented with the key.
KeyMismatch //23XX
// GenerationFailed indicates that a private key could not
// be generated.
GenerationFailed //24XX
// Unavailable indicates that a private key mechanism (such as
// PKCS #11) was requested but support for that mechanism is
// not available.
Unavailable
)
// The following are policy-related non-parsing errors, and must be
// specified along with PolicyError.
const (
// NoKeyUsages indicates that the profile does not permit any
// key usages for the certificate.
NoKeyUsages Reason = 100 * (iota + 1) // 51XX
// InvalidPolicy indicates that policy being requested is not
// a valid policy or does not exist.
InvalidPolicy // 52XX
// InvalidRequest indicates a certificate request violated the
// constraints of the policy being applied to the request.
InvalidRequest // 53XX
// UnknownProfile indicates that the profile does not exist.
UnknownProfile // 54XX
UnmatchedWhitelist // 55xx
)
// The following are API client related errors, and should be
// specified with APIClientError.
const (
// AuthenticationFailure occurs when the client is unable
// to obtain an authentication token for the request.
AuthenticationFailure Reason = 100 * (iota + 1)
// JSONError wraps an encoding/json error.
JSONError
// IOError wraps an io/ioutil error.
IOError
// ClientHTTPError wraps a net/http error.
ClientHTTPError
// ServerRequestFailed covers any other failures from the API
// client.
ServerRequestFailed
)
// The following are OCSP related errors, and should be
// specified with OCSPError
const (
// IssuerMismatch ocurs when the certificate in the OCSP signing
// request was not issued by the CA that this responder responds for.
IssuerMismatch Reason = 100 * (iota + 1) // 81XX
// InvalidStatus occurs when the OCSP signing requests includes an
// invalid value for the certificate status.
InvalidStatus
)
// Certificate transparency related errors specified with CTError
const (
// PrecertSubmissionFailed occurs when submitting a precertificate to
// a log server fails
PrecertSubmissionFailed = 100 * (iota + 1)
// CTClientConstructionFailed occurs when the construction of a new
// github.com/google/certificate-transparency client fails.
CTClientConstructionFailed
// PrecertMissingPoison occurs when a precert is passed to SignFromPrecert
// and is missing the CT poison extension.
PrecertMissingPoison
// PrecertInvalidPoison occurs when a precert is passed to SignFromPrecert
// and has a invalid CT poison extension value or the extension is not
// critical.
PrecertInvalidPoison
)
// Certificate persistence related errors specified with CertStoreError
const (
// InsertionFailed occurs when a SQL insert query failes to complete.
InsertionFailed = 100 * (iota + 1)
// RecordNotFound occurs when a SQL query targeting on one unique
// record failes to update the specified row in the table.
RecordNotFound
)
// The error interface implementation, which formats to a JSON object string.
func (e *Error) Error() string {
marshaled, err := json.Marshal(e)
if err != nil {
panic(err)
}
return string(marshaled)
}
// New returns an error that contains an error code and message derived from
// the given category, reason. Currently, to avoid confusion, it is not
// allowed to create an error of category Success
func New(category Category, reason Reason) *Error {
errorCode := int(category) + int(reason)
var msg string
switch category {
case OCSPError:
switch reason {
case ReadFailed:
msg = "No certificate provided"
case IssuerMismatch:
msg = "Certificate not issued by this issuer"
case InvalidStatus:
msg = "Invalid revocation status"
}
case CertificateError:
switch reason {
case Unknown:
msg = "Unknown certificate error"
case ReadFailed:
msg = "Failed to read certificate"
case DecodeFailed:
msg = "Failed to decode certificate"
case ParseFailed:
msg = "Failed to parse certificate"
case SelfSigned:
msg = "Certificate is self signed"
case VerifyFailed:
msg = "Unable to verify certificate"
case BadRequest:
msg = "Invalid certificate request"
case MissingSerial:
msg = "Missing serial number in request"
default:
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category CertificateError.",
reason))
}
case PrivateKeyError:
switch reason {
case Unknown:
msg = "Unknown private key error"
case ReadFailed:
msg = "Failed to read private key"
case DecodeFailed:
msg = "Failed to decode private key"
case ParseFailed:
msg = "Failed to parse private key"
case Encrypted:
msg = "Private key is encrypted."
case NotRSAOrECC:
msg = "Private key algorithm is not RSA or ECC"
case KeyMismatch:
msg = "Private key does not match public key"
case GenerationFailed:
msg = "Failed to new private key"
case Unavailable:
msg = "Private key is unavailable"
default:
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PrivateKeyError.",
reason))
}
case IntermediatesError:
switch reason {
case Unknown:
msg = "Unknown intermediate certificate error"
case ReadFailed:
msg = "Failed to read intermediate certificate"
case DecodeFailed:
msg = "Failed to decode intermediate certificate"
case ParseFailed:
msg = "Failed to parse intermediate certificate"
default:
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category IntermediatesError.",
reason))
}
case RootError:
switch reason {
case Unknown:
msg = "Unknown root certificate error"
case ReadFailed:
msg = "Failed to read root certificate"
case DecodeFailed:
msg = "Failed to decode root certificate"
case ParseFailed:
msg = "Failed to parse root certificate"
default:
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category RootError.",
reason))
}
case PolicyError:
switch reason {
case Unknown:
msg = "Unknown policy error"
case NoKeyUsages:
msg = "Invalid policy: no key usage available"
case InvalidPolicy:
msg = "Invalid or unknown policy"
case InvalidRequest:
msg = "Policy violation request"
case UnknownProfile:
msg = "Unknown policy profile"
case UnmatchedWhitelist:
msg = "Request does not match policy whitelist"
default:
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PolicyError.",
reason))
}
case DialError:
switch reason {
case Unknown:
msg = "Failed to dial remote server"
default:
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category DialError.",
reason))
}
case APIClientError:
switch reason {
case AuthenticationFailure:
msg = "API client authentication failure"
case JSONError:
msg = "API client JSON config error"
case ClientHTTPError:
msg = "API client HTTP error"
case IOError:
msg = "API client IO error"
case ServerRequestFailed:
msg = "API client error: Server request failed"
default:
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category APIClientError.",
reason))
}
case CSRError:
switch reason {
case Unknown:
msg = "CSR parsing failed due to unknown error"
case ReadFailed:
msg = "CSR file read failed"
case ParseFailed:
msg = "CSR Parsing failed"
case DecodeFailed:
msg = "CSR Decode failed"
case BadRequest:
msg = "CSR Bad request"
default:
panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category APIClientError.", reason))
}
case CTError:
switch reason {
case Unknown:
msg = "Certificate transparency parsing failed due to unknown error"
case PrecertSubmissionFailed:
msg = "Certificate transparency precertificate submission failed"
case PrecertMissingPoison:
msg = "Precertificate is missing CT poison extension"
case PrecertInvalidPoison:
msg = "Precertificate contains an invalid CT poison extension"
default:
panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CTError.", reason))
}
case CertStoreError:
switch reason {
case Unknown:
msg = "Certificate store action failed due to unknown error"
default:
panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CertStoreError.", reason))
}
default:
panic(fmt.Sprintf("Unsupported CFSSL error type: %d.",
category))
}
return &Error{ErrorCode: errorCode, Message: msg}
}
// Wrap returns an error that contains the given error and an error code derived from
// the given category, reason and the error. Currently, to avoid confusion, it is not
// allowed to create an error of category Success
func Wrap(category Category, reason Reason, err error) *Error {
errorCode := int(category) + int(reason)
if err == nil {
panic("Wrap needs a supplied error to initialize.")
}
// do not double wrap a error
switch err.(type) {
case *Error:
panic("Unable to wrap a wrapped error.")
}
switch category {
case CertificateError:
// given VerifyFailed , report the status with more detailed status code
// for some certificate errors we care.
if reason == VerifyFailed {
switch errorType := err.(type) {
case x509.CertificateInvalidError:
errorCode += certificateInvalid + int(errorType.Reason)
case x509.UnknownAuthorityError:
errorCode += unknownAuthority
}
}
case PrivateKeyError, IntermediatesError, RootError, PolicyError, DialError,
APIClientError, CSRError, CTError, CertStoreError, OCSPError:
// no-op, just use the error
default:
panic(fmt.Sprintf("Unsupported CFSSL error type: %d.",
category))
}
return &Error{ErrorCode: errorCode, Message: err.Error()}
}

View File

@ -1,47 +0,0 @@
package errors
import (
"errors"
"net/http"
)
// HTTPError is an augmented error with a HTTP status code.
type HTTPError struct {
StatusCode int
error
}
// Error implements the error interface.
func (e *HTTPError) Error() string {
return e.error.Error()
}
// NewMethodNotAllowed returns an appropriate error in the case that
// an HTTP client uses an invalid method (i.e. a GET in place of a POST)
// on an API endpoint.
func NewMethodNotAllowed(method string) *HTTPError {
return &HTTPError{http.StatusMethodNotAllowed, errors.New(`Method is not allowed:"` + method + `"`)}
}
// NewBadRequest creates a HttpError with the given error and error code 400.
func NewBadRequest(err error) *HTTPError {
return &HTTPError{http.StatusBadRequest, err}
}
// NewBadRequestString returns a HttpError with the supplied message
// and error code 400.
func NewBadRequestString(s string) *HTTPError {
return NewBadRequest(errors.New(s))
}
// NewBadRequestMissingParameter returns a 400 HttpError as a required
// parameter is missing in the HTTP request.
func NewBadRequestMissingParameter(s string) *HTTPError {
return NewBadRequestString(`Missing parameter "` + s + `"`)
}
// NewBadRequestUnwantedParameter returns a 400 HttpError as a unnecessary
// parameter is present in the HTTP request.
func NewBadRequestUnwantedParameter(s string) *HTTPError {
return NewBadRequestString(`Unwanted parameter "` + s + `"`)
}

View File

@ -1,37 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["helpers.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/cloudflare/cfssl/helpers",
importpath = "github.com/cloudflare/cfssl/helpers",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/cloudflare/cfssl/crypto/pkcs7:go_default_library",
"//vendor/github.com/cloudflare/cfssl/errors:go_default_library",
"//vendor/github.com/cloudflare/cfssl/helpers/derhelpers:go_default_library",
"//vendor/github.com/cloudflare/cfssl/log:go_default_library",
"//vendor/github.com/google/certificate-transparency-go:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/tls:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/x509:go_default_library",
"//vendor/golang.org/x/crypto/ocsp:go_default_library",
"//vendor/golang.org/x/crypto/pkcs12:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/cloudflare/cfssl/helpers/derhelpers:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,30 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"derhelpers.go",
"ed25519.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/cloudflare/cfssl/helpers/derhelpers",
importpath = "github.com/cloudflare/cfssl/helpers/derhelpers",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/cloudflare/cfssl/errors:go_default_library",
"//vendor/golang.org/x/crypto/ed25519:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,48 +0,0 @@
// Package derhelpers implements common functionality
// on DER encoded data
package derhelpers
import (
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/x509"
cferr "github.com/cloudflare/cfssl/errors"
"golang.org/x/crypto/ed25519"
)
// ParsePrivateKeyDER parses a PKCS #1, PKCS #8, ECDSA, or Ed25519 DER-encoded
// private key. The key must not be in PEM format.
func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) {
generalKey, err := x509.ParsePKCS8PrivateKey(keyDER)
if err != nil {
generalKey, err = x509.ParsePKCS1PrivateKey(keyDER)
if err != nil {
generalKey, err = x509.ParseECPrivateKey(keyDER)
if err != nil {
generalKey, err = ParseEd25519PrivateKey(keyDER)
if err != nil {
// We don't include the actual error into
// the final error. The reason might be
// we don't want to leak any info about
// the private key.
return nil, cferr.New(cferr.PrivateKeyError,
cferr.ParseFailed)
}
}
}
}
switch generalKey.(type) {
case *rsa.PrivateKey:
return generalKey.(*rsa.PrivateKey), nil
case *ecdsa.PrivateKey:
return generalKey.(*ecdsa.PrivateKey), nil
case ed25519.PrivateKey:
return generalKey.(ed25519.PrivateKey), nil
}
// should never reach here
return nil, cferr.New(cferr.PrivateKeyError, cferr.ParseFailed)
}

View File

@ -1,133 +0,0 @@
package derhelpers
import (
"crypto"
"crypto/x509/pkix"
"encoding/asn1"
"errors"
"golang.org/x/crypto/ed25519"
)
var errEd25519WrongID = errors.New("incorrect object identifier")
var errEd25519WrongKeyType = errors.New("incorrect key type")
// ed25519OID is the OID for the Ed25519 signature scheme: see
// https://datatracker.ietf.org/doc/draft-ietf-curdle-pkix-04.
var ed25519OID = asn1.ObjectIdentifier{1, 3, 101, 112}
// subjectPublicKeyInfo reflects the ASN.1 object defined in the X.509 standard.
//
// This is defined in crypto/x509 as "publicKeyInfo".
type subjectPublicKeyInfo struct {
Algorithm pkix.AlgorithmIdentifier
PublicKey asn1.BitString
}
// MarshalEd25519PublicKey creates a DER-encoded SubjectPublicKeyInfo for an
// ed25519 public key, as defined in
// https://tools.ietf.org/html/draft-ietf-curdle-pkix-04. This is analagous to
// MarshalPKIXPublicKey in crypto/x509, which doesn't currently support Ed25519.
func MarshalEd25519PublicKey(pk crypto.PublicKey) ([]byte, error) {
pub, ok := pk.(ed25519.PublicKey)
if !ok {
return nil, errEd25519WrongKeyType
}
spki := subjectPublicKeyInfo{
Algorithm: pkix.AlgorithmIdentifier{
Algorithm: ed25519OID,
},
PublicKey: asn1.BitString{
BitLength: len(pub) * 8,
Bytes: pub,
},
}
return asn1.Marshal(spki)
}
// ParseEd25519PublicKey returns the Ed25519 public key encoded by the input.
func ParseEd25519PublicKey(der []byte) (crypto.PublicKey, error) {
var spki subjectPublicKeyInfo
if rest, err := asn1.Unmarshal(der, &spki); err != nil {
return nil, err
} else if len(rest) > 0 {
return nil, errors.New("SubjectPublicKeyInfo too long")
}
if !spki.Algorithm.Algorithm.Equal(ed25519OID) {
return nil, errEd25519WrongID
}
if spki.PublicKey.BitLength != ed25519.PublicKeySize*8 {
return nil, errors.New("SubjectPublicKeyInfo PublicKey length mismatch")
}
return ed25519.PublicKey(spki.PublicKey.Bytes), nil
}
// oneAsymmetricKey reflects the ASN.1 structure for storing private keys in
// https://tools.ietf.org/html/draft-ietf-curdle-pkix-04, excluding the optional
// fields, which we don't use here.
//
// This is identical to pkcs8 in crypto/x509.
type oneAsymmetricKey struct {
Version int
Algorithm pkix.AlgorithmIdentifier
PrivateKey []byte
}
// curvePrivateKey is the innter type of the PrivateKey field of
// oneAsymmetricKey.
type curvePrivateKey []byte
// MarshalEd25519PrivateKey returns a DER encdoing of the input private key as
// specified in https://tools.ietf.org/html/draft-ietf-curdle-pkix-04.
func MarshalEd25519PrivateKey(sk crypto.PrivateKey) ([]byte, error) {
priv, ok := sk.(ed25519.PrivateKey)
if !ok {
return nil, errEd25519WrongKeyType
}
// Marshal the innter CurvePrivateKey.
curvePrivateKey, err := asn1.Marshal(priv.Seed())
if err != nil {
return nil, err
}
// Marshal the OneAsymmetricKey.
asym := oneAsymmetricKey{
Version: 0,
Algorithm: pkix.AlgorithmIdentifier{
Algorithm: ed25519OID,
},
PrivateKey: curvePrivateKey,
}
return asn1.Marshal(asym)
}
// ParseEd25519PrivateKey returns the Ed25519 private key encoded by the input.
func ParseEd25519PrivateKey(der []byte) (crypto.PrivateKey, error) {
asym := new(oneAsymmetricKey)
if rest, err := asn1.Unmarshal(der, asym); err != nil {
return nil, err
} else if len(rest) > 0 {
return nil, errors.New("OneAsymmetricKey too long")
}
// Check that the key type is correct.
if !asym.Algorithm.Algorithm.Equal(ed25519OID) {
return nil, errEd25519WrongID
}
// Unmarshal the inner CurvePrivateKey.
seed := new(curvePrivateKey)
if rest, err := asn1.Unmarshal(asym.PrivateKey, seed); err != nil {
return nil, err
} else if len(rest) > 0 {
return nil, errors.New("CurvePrivateKey too long")
}
return ed25519.NewKeyFromSeed(*seed), nil
}

View File

@ -1,590 +0,0 @@
// Package helpers implements utility functionality common to many
// CFSSL packages.
package helpers
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"os"
"github.com/google/certificate-transparency-go"
cttls "github.com/google/certificate-transparency-go/tls"
ctx509 "github.com/google/certificate-transparency-go/x509"
"golang.org/x/crypto/ocsp"
"strings"
"time"
"github.com/cloudflare/cfssl/crypto/pkcs7"
cferr "github.com/cloudflare/cfssl/errors"
"github.com/cloudflare/cfssl/helpers/derhelpers"
"github.com/cloudflare/cfssl/log"
"golang.org/x/crypto/pkcs12"
)
// OneYear is a time.Duration representing a year's worth of seconds.
const OneYear = 8760 * time.Hour
// OneDay is a time.Duration representing a day's worth of seconds.
const OneDay = 24 * time.Hour
// InclusiveDate returns the time.Time representation of a date - 1
// nanosecond. This allows time.After to be used inclusively.
func InclusiveDate(year int, month time.Month, day int) time.Time {
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond)
}
// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop
// issuing certificates valid for more than 5 years.
var Jul2012 = InclusiveDate(2012, time.July, 01)
// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop
// issuing certificates valid for more than 39 months.
var Apr2015 = InclusiveDate(2015, time.April, 01)
// KeyLength returns the bit size of ECDSA or RSA PublicKey
func KeyLength(key interface{}) int {
if key == nil {
return 0
}
if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok {
return ecdsaKey.Curve.Params().BitSize
} else if rsaKey, ok := key.(*rsa.PublicKey); ok {
return rsaKey.N.BitLen()
}
return 0
}
// ExpiryTime returns the time when the certificate chain is expired.
func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) {
if len(chain) == 0 {
return
}
notAfter = chain[0].NotAfter
for _, cert := range chain {
if notAfter.After(cert.NotAfter) {
notAfter = cert.NotAfter
}
}
return
}
// MonthsValid returns the number of months for which a certificate is valid.
func MonthsValid(c *x509.Certificate) int {
issued := c.NotBefore
expiry := c.NotAfter
years := (expiry.Year() - issued.Year())
months := years*12 + int(expiry.Month()) - int(issued.Month())
// Round up if valid for less than a full month
if expiry.Day() > issued.Day() {
months++
}
return months
}
// ValidExpiry determines if a certificate is valid for an acceptable
// length of time per the CA/Browser Forum baseline requirements.
// See https://cabforum.org/wp-content/uploads/CAB-Forum-BR-1.3.0.pdf
func ValidExpiry(c *x509.Certificate) bool {
issued := c.NotBefore
var maxMonths int
switch {
case issued.After(Apr2015):
maxMonths = 39
case issued.After(Jul2012):
maxMonths = 60
case issued.Before(Jul2012):
maxMonths = 120
}
if MonthsValid(c) > maxMonths {
return false
}
return true
}
// SignatureString returns the TLS signature string corresponding to
// an X509 signature algorithm.
func SignatureString(alg x509.SignatureAlgorithm) string {
switch alg {
case x509.MD2WithRSA:
return "MD2WithRSA"
case x509.MD5WithRSA:
return "MD5WithRSA"
case x509.SHA1WithRSA:
return "SHA1WithRSA"
case x509.SHA256WithRSA:
return "SHA256WithRSA"
case x509.SHA384WithRSA:
return "SHA384WithRSA"
case x509.SHA512WithRSA:
return "SHA512WithRSA"
case x509.DSAWithSHA1:
return "DSAWithSHA1"
case x509.DSAWithSHA256:
return "DSAWithSHA256"
case x509.ECDSAWithSHA1:
return "ECDSAWithSHA1"
case x509.ECDSAWithSHA256:
return "ECDSAWithSHA256"
case x509.ECDSAWithSHA384:
return "ECDSAWithSHA384"
case x509.ECDSAWithSHA512:
return "ECDSAWithSHA512"
default:
return "Unknown Signature"
}
}
// HashAlgoString returns the hash algorithm name contains in the signature
// method.
func HashAlgoString(alg x509.SignatureAlgorithm) string {
switch alg {
case x509.MD2WithRSA:
return "MD2"
case x509.MD5WithRSA:
return "MD5"
case x509.SHA1WithRSA:
return "SHA1"
case x509.SHA256WithRSA:
return "SHA256"
case x509.SHA384WithRSA:
return "SHA384"
case x509.SHA512WithRSA:
return "SHA512"
case x509.DSAWithSHA1:
return "SHA1"
case x509.DSAWithSHA256:
return "SHA256"
case x509.ECDSAWithSHA1:
return "SHA1"
case x509.ECDSAWithSHA256:
return "SHA256"
case x509.ECDSAWithSHA384:
return "SHA384"
case x509.ECDSAWithSHA512:
return "SHA512"
default:
return "Unknown Hash Algorithm"
}
}
// StringTLSVersion returns underlying enum values from human names for TLS
// versions, defaults to current golang default of TLS 1.0
func StringTLSVersion(version string) uint16 {
switch version {
case "1.2":
return tls.VersionTLS12
case "1.1":
return tls.VersionTLS11
default:
return tls.VersionTLS10
}
}
// EncodeCertificatesPEM encodes a number of x509 certificates to PEM
func EncodeCertificatesPEM(certs []*x509.Certificate) []byte {
var buffer bytes.Buffer
for _, cert := range certs {
pem.Encode(&buffer, &pem.Block{
Type: "CERTIFICATE",
Bytes: cert.Raw,
})
}
return buffer.Bytes()
}
// EncodeCertificatePEM encodes a single x509 certificates to PEM
func EncodeCertificatePEM(cert *x509.Certificate) []byte {
return EncodeCertificatesPEM([]*x509.Certificate{cert})
}
// ParseCertificatesPEM parses a sequence of PEM-encoded certificate and returns them,
// can handle PEM encoded PKCS #7 structures.
func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) {
var certs []*x509.Certificate
var err error
certsPEM = bytes.TrimSpace(certsPEM)
for len(certsPEM) > 0 {
var cert []*x509.Certificate
cert, certsPEM, err = ParseOneCertificateFromPEM(certsPEM)
if err != nil {
return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed)
} else if cert == nil {
break
}
certs = append(certs, cert...)
}
if len(certsPEM) > 0 {
return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
}
return certs, nil
}
// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key,
// either PKCS #7, PKCS #12, or raw x509.
func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) {
certsDER = bytes.TrimSpace(certsDER)
pkcs7data, err := pkcs7.ParsePKCS7(certsDER)
if err != nil {
var pkcs12data interface{}
certs = make([]*x509.Certificate, 1)
pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password)
if err != nil {
certs, err = x509.ParseCertificates(certsDER)
if err != nil {
return nil, nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
}
} else {
key = pkcs12data.(crypto.Signer)
}
} else {
if pkcs7data.ContentInfo != "SignedData" {
return nil, nil, cferr.Wrap(cferr.CertificateError, cferr.DecodeFailed, errors.New("can only extract certificates from signed data content info"))
}
certs = pkcs7data.Content.SignedData.Certificates
}
if certs == nil {
return nil, key, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
}
return certs, key, nil
}
// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed.
func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
cert, err := ParseCertificatePEM(certPEM)
if err != nil {
return nil, err
}
if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.VerifyFailed, err)
}
return cert, nil
}
// ParseCertificatePEM parses and returns a PEM-encoded certificate,
// can handle PEM encoded PKCS #7 structures.
func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
certPEM = bytes.TrimSpace(certPEM)
cert, rest, err := ParseOneCertificateFromPEM(certPEM)
if err != nil {
// Log the actual parsing error but throw a default parse error message.
log.Debugf("Certificate parsing error: %v", err)
return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed)
} else if cert == nil {
return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
} else if len(rest) > 0 {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PEM file should contain only one object"))
} else if len(cert) > 1 {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PKCS7 object in the PEM file should contain only one certificate"))
}
return cert[0], nil
}
// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object,
// either a raw x509 certificate or a PKCS #7 structure possibly containing
// multiple certificates, from the top of certsPEM, which itself may
// contain multiple PEM encoded certificate objects.
func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) {
block, rest := pem.Decode(certsPEM)
if block == nil {
return nil, rest, nil
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes)
if err != nil {
return nil, rest, err
}
if pkcs7data.ContentInfo != "SignedData" {
return nil, rest, errors.New("only PKCS #7 Signed Data Content Info supported for certificate parsing")
}
certs := pkcs7data.Content.SignedData.Certificates
if certs == nil {
return nil, rest, errors.New("PKCS #7 structure contains no certificates")
}
return certs, rest, nil
}
var certs = []*x509.Certificate{cert}
return certs, rest, nil
}
// LoadPEMCertPool loads a pool of PEM certificates from file.
func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
if certsFile == "" {
return nil, nil
}
pemCerts, err := ioutil.ReadFile(certsFile)
if err != nil {
return nil, err
}
return PEMToCertPool(pemCerts)
}
// PEMToCertPool concerts PEM certificates to a CertPool.
func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
if len(pemCerts) == 0 {
return nil, nil
}
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(pemCerts) {
return nil, errors.New("failed to load cert pool")
}
return certPool, nil
}
// ParsePrivateKeyPEM parses and returns a PEM-encoded private
// key. The private key may be either an unencrypted PKCS#8, PKCS#1,
// or elliptic private key.
func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) {
return ParsePrivateKeyPEMWithPassword(keyPEM, nil)
}
// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private
// key. The private key may be a potentially encrypted PKCS#8, PKCS#1,
// or elliptic private key.
func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) {
keyDER, err := GetKeyDERFromPEM(keyPEM, password)
if err != nil {
return nil, err
}
return derhelpers.ParsePrivateKeyDER(keyDER)
}
// GetKeyDERFromPEM parses a PEM-encoded private key and returns DER-format key bytes.
func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) {
keyDER, _ := pem.Decode(in)
if keyDER != nil {
if procType, ok := keyDER.Headers["Proc-Type"]; ok {
if strings.Contains(procType, "ENCRYPTED") {
if password != nil {
return x509.DecryptPEMBlock(keyDER, password)
}
return nil, cferr.New(cferr.PrivateKeyError, cferr.Encrypted)
}
}
return keyDER.Bytes, nil
}
return nil, cferr.New(cferr.PrivateKeyError, cferr.DecodeFailed)
}
// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request.
func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) {
in = bytes.TrimSpace(in)
p, rest := pem.Decode(in)
if p != nil {
if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
return nil, rest, cferr.New(cferr.CSRError, cferr.BadRequest)
}
csr, err = x509.ParseCertificateRequest(p.Bytes)
} else {
csr, err = x509.ParseCertificateRequest(in)
}
if err != nil {
return nil, rest, err
}
err = csr.CheckSignature()
if err != nil {
return nil, rest, err
}
return csr, rest, nil
}
// ParseCSRPEM parses a PEM-encoded certificate signing request.
// It does not check the signature. This is useful for dumping data from a CSR
// locally.
func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
block, _ := pem.Decode([]byte(csrPEM))
if block == nil {
return nil, cferr.New(cferr.CSRError, cferr.DecodeFailed)
}
csrObject, err := x509.ParseCertificateRequest(block.Bytes)
if err != nil {
return nil, err
}
return csrObject, nil
}
// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer.
func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
switch pub := priv.Public().(type) {
case *rsa.PublicKey:
bitLength := pub.N.BitLen()
switch {
case bitLength >= 4096:
return x509.SHA512WithRSA
case bitLength >= 3072:
return x509.SHA384WithRSA
case bitLength >= 2048:
return x509.SHA256WithRSA
default:
return x509.SHA1WithRSA
}
case *ecdsa.PublicKey:
switch pub.Curve {
case elliptic.P521():
return x509.ECDSAWithSHA512
case elliptic.P384():
return x509.ECDSAWithSHA384
case elliptic.P256():
return x509.ECDSAWithSHA256
default:
return x509.ECDSAWithSHA1
}
default:
return x509.UnknownSignatureAlgorithm
}
}
// LoadClientCertificate load key/certificate from pem files
func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, error) {
if certFile != "" && keyFile != "" {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
log.Critical("Unable to read client certificate from file: %s or key from file: %s", certFile, keyFile)
return nil, err
}
log.Debug("Client certificate loaded ")
return &cert, nil
}
return nil, nil
}
// CreateTLSConfig creates a tls.Config object from certs and roots
func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Config {
var certs []tls.Certificate
if cert != nil {
certs = []tls.Certificate{*cert}
}
return &tls.Config{
Certificates: certs,
RootCAs: remoteCAs,
}
}
// SerializeSCTList serializes a list of SCTs.
func SerializeSCTList(sctList []ct.SignedCertificateTimestamp) ([]byte, error) {
list := ctx509.SignedCertificateTimestampList{}
for _, sct := range sctList {
sctBytes, err := cttls.Marshal(sct)
if err != nil {
return nil, err
}
list.SCTList = append(list.SCTList, ctx509.SerializedSCT{Val: sctBytes})
}
return cttls.Marshal(list)
}
// DeserializeSCTList deserializes a list of SCTs.
func DeserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimestamp, error) {
var sctList ctx509.SignedCertificateTimestampList
rest, err := cttls.Unmarshal(serializedSCTList, &sctList)
if err != nil {
return nil, err
}
if len(rest) != 0 {
return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, errors.New("serialized SCT list contained trailing garbage"))
}
list := make([]ct.SignedCertificateTimestamp, len(sctList.SCTList))
for i, serializedSCT := range sctList.SCTList {
var sct ct.SignedCertificateTimestamp
rest, err := cttls.Unmarshal(serializedSCT.Val, &sct)
if err != nil {
return nil, err
}
if len(rest) != 0 {
return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, errors.New("serialized SCT contained trailing garbage"))
}
list[i] = sct
}
return list, nil
}
// SCTListFromOCSPResponse extracts the SCTList from an ocsp.Response,
// returning an empty list if the SCT extension was not found or could not be
// unmarshalled.
func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTimestamp, error) {
// This loop finds the SCTListExtension in the OCSP response.
var SCTListExtension, ext pkix.Extension
for _, ext = range response.Extensions {
// sctExtOid is the ObjectIdentifier of a Signed Certificate Timestamp.
sctExtOid := asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 5}
if ext.Id.Equal(sctExtOid) {
SCTListExtension = ext
break
}
}
// This code block extracts the sctList from the SCT extension.
var sctList []ct.SignedCertificateTimestamp
var err error
if numBytes := len(SCTListExtension.Value); numBytes != 0 {
var serializedSCTList []byte
rest := make([]byte, numBytes)
copy(rest, SCTListExtension.Value)
for len(rest) != 0 {
rest, err = asn1.Unmarshal(rest, &serializedSCTList)
if err != nil {
return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err)
}
}
sctList, err = DeserializeSCTList(serializedSCTList)
}
return sctList, err
}
// ReadBytes reads a []byte either from a file or an environment variable.
// If valFile has a prefix of 'env:', the []byte is read from the environment
// using the subsequent name. If the prefix is 'file:' the []byte is read from
// the subsequent file. If no prefix is provided, valFile is assumed to be a
// file path.
func ReadBytes(valFile string) ([]byte, error) {
switch splitVal := strings.SplitN(valFile, ":", 2); len(splitVal) {
case 1:
return ioutil.ReadFile(valFile)
case 2:
switch splitVal[0] {
case "env":
return []byte(os.Getenv(splitVal[1])), nil
case "file":
return ioutil.ReadFile(splitVal[1])
default:
return nil, fmt.Errorf("unknown prefix: %s", splitVal[0])
}
default:
return nil, fmt.Errorf("multiple prefixes: %s",
strings.Join(splitVal[:len(splitVal)-1], ", "))
}
}

View File

@ -1,23 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["info.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/cloudflare/cfssl/info",
importpath = "github.com/cloudflare/cfssl/info",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,15 +0,0 @@
// Package info contains the definitions for the info endpoint
package info
// Req is the request struct for an info API request.
type Req struct {
Label string `json:"label"`
Profile string `json:"profile"`
}
// Resp is the response for an Info API request.
type Resp struct {
Certificate string `json:"certificate"`
Usage []string `json:"usages"`
ExpiryString string `json:"expiry"`
}

View File

@ -1,23 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["log.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/cloudflare/cfssl/log",
importpath = "github.com/cloudflare/cfssl/log",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,162 +0,0 @@
// Package log implements a wrapper around the Go standard library's
// logging package. Clients should set the current log level; only
// messages below that level will actually be logged. For example, if
// Level is set to LevelWarning, only log messages at the Warning,
// Error, and Critical levels will be logged.
package log
import (
"fmt"
"log"
"os"
)
// The following constants represent logging levels in increasing levels of seriousness.
const (
// LevelDebug is the log level for Debug statements.
LevelDebug = iota
// LevelInfo is the log level for Info statements.
LevelInfo
// LevelWarning is the log level for Warning statements.
LevelWarning
// LevelError is the log level for Error statements.
LevelError
// LevelCritical is the log level for Critical statements.
LevelCritical
// LevelFatal is the log level for Fatal statements.
LevelFatal
)
var levelPrefix = [...]string{
LevelDebug: "DEBUG",
LevelInfo: "INFO",
LevelWarning: "WARNING",
LevelError: "ERROR",
LevelCritical: "CRITICAL",
LevelFatal: "FATAL",
}
// Level stores the current logging level.
var Level = LevelInfo
// SyslogWriter specifies the necessary methods for an alternate output
// destination passed in via SetLogger.
//
// SyslogWriter is satisfied by *syslog.Writer.
type SyslogWriter interface {
Debug(string)
Info(string)
Warning(string)
Err(string)
Crit(string)
Emerg(string)
}
// syslogWriter stores the SetLogger() parameter.
var syslogWriter SyslogWriter
// SetLogger sets the output used for output by this package.
// A *syslog.Writer is a good choice for the logger parameter.
// Call with a nil parameter to revert to default behavior.
func SetLogger(logger SyslogWriter) {
syslogWriter = logger
}
func print(l int, msg string) {
if l >= Level {
if syslogWriter != nil {
switch l {
case LevelDebug:
syslogWriter.Debug(msg)
case LevelInfo:
syslogWriter.Info(msg)
case LevelWarning:
syslogWriter.Warning(msg)
case LevelError:
syslogWriter.Err(msg)
case LevelCritical:
syslogWriter.Crit(msg)
case LevelFatal:
syslogWriter.Emerg(msg)
}
} else {
log.Printf("[%s] %s", levelPrefix[l], msg)
}
}
}
func outputf(l int, format string, v []interface{}) {
print(l, fmt.Sprintf(format, v...))
}
func output(l int, v []interface{}) {
print(l, fmt.Sprint(v...))
}
// Fatalf logs a formatted message at the "fatal" level and then exits. The
// arguments are handled in the same manner as fmt.Printf.
func Fatalf(format string, v ...interface{}) {
outputf(LevelFatal, format, v)
os.Exit(1)
}
// Fatal logs its arguments at the "fatal" level and then exits.
func Fatal(v ...interface{}) {
output(LevelFatal, v)
os.Exit(1)
}
// Criticalf logs a formatted message at the "critical" level. The
// arguments are handled in the same manner as fmt.Printf.
func Criticalf(format string, v ...interface{}) {
outputf(LevelCritical, format, v)
}
// Critical logs its arguments at the "critical" level.
func Critical(v ...interface{}) {
output(LevelCritical, v)
}
// Errorf logs a formatted message at the "error" level. The arguments
// are handled in the same manner as fmt.Printf.
func Errorf(format string, v ...interface{}) {
outputf(LevelError, format, v)
}
// Error logs its arguments at the "error" level.
func Error(v ...interface{}) {
output(LevelError, v)
}
// Warningf logs a formatted message at the "warning" level. The
// arguments are handled in the same manner as fmt.Printf.
func Warningf(format string, v ...interface{}) {
outputf(LevelWarning, format, v)
}
// Warning logs its arguments at the "warning" level.
func Warning(v ...interface{}) {
output(LevelWarning, v)
}
// Infof logs a formatted message at the "info" level. The arguments
// are handled in the same manner as fmt.Printf.
func Infof(format string, v ...interface{}) {
outputf(LevelInfo, format, v)
}
// Info logs its arguments at the "info" level.
func Info(v ...interface{}) {
output(LevelInfo, v)
}
// Debugf logs a formatted message at the "debug" level. The arguments
// are handled in the same manner as fmt.Printf.
func Debugf(format string, v ...interface{}) {
outputf(LevelDebug, format, v)
}
// Debug logs its arguments at the "debug" level.
func Debug(v ...interface{}) {
output(LevelDebug, v)
}

View File

@ -1,23 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["config.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/cloudflare/cfssl/ocsp/config",
importpath = "github.com/cloudflare/cfssl/ocsp/config",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,13 +0,0 @@
// Package config in the ocsp directory provides configuration data for an OCSP
// signer.
package config
import "time"
// Config contains configuration information required to set up an OCSP signer.
type Config struct {
CACertFile string
ResponderCertFile string
KeyFile string
Interval time.Duration
}

View File

@ -1,33 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["signer.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/cloudflare/cfssl/signer",
importpath = "github.com/cloudflare/cfssl/signer",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/cloudflare/cfssl/certdb:go_default_library",
"//vendor/github.com/cloudflare/cfssl/config:go_default_library",
"//vendor/github.com/cloudflare/cfssl/csr:go_default_library",
"//vendor/github.com/cloudflare/cfssl/errors:go_default_library",
"//vendor/github.com/cloudflare/cfssl/info:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/cloudflare/cfssl/signer/local:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,36 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["local.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/cloudflare/cfssl/signer/local",
importpath = "github.com/cloudflare/cfssl/signer/local",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/cloudflare/cfssl/certdb:go_default_library",
"//vendor/github.com/cloudflare/cfssl/config:go_default_library",
"//vendor/github.com/cloudflare/cfssl/errors:go_default_library",
"//vendor/github.com/cloudflare/cfssl/helpers:go_default_library",
"//vendor/github.com/cloudflare/cfssl/info:go_default_library",
"//vendor/github.com/cloudflare/cfssl/log:go_default_library",
"//vendor/github.com/cloudflare/cfssl/signer:go_default_library",
"//vendor/github.com/google/certificate-transparency-go:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/client:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/jsonclient:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,556 +0,0 @@
// Package local implements certificate signature functionality for CFSSL.
package local
import (
"bytes"
"crypto"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/hex"
"encoding/pem"
"errors"
"io"
"math/big"
"net"
"net/http"
"net/mail"
"os"
"github.com/cloudflare/cfssl/certdb"
"github.com/cloudflare/cfssl/config"
cferr "github.com/cloudflare/cfssl/errors"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/info"
"github.com/cloudflare/cfssl/log"
"github.com/cloudflare/cfssl/signer"
"github.com/google/certificate-transparency-go"
"github.com/google/certificate-transparency-go/client"
"github.com/google/certificate-transparency-go/jsonclient"
"golang.org/x/net/context"
)
// Signer contains a signer that uses the standard library to
// support both ECDSA and RSA CA keys.
type Signer struct {
ca *x509.Certificate
priv crypto.Signer
policy *config.Signing
sigAlgo x509.SignatureAlgorithm
dbAccessor certdb.Accessor
}
// NewSigner creates a new Signer directly from a
// private key and certificate, with optional policy.
func NewSigner(priv crypto.Signer, cert *x509.Certificate, sigAlgo x509.SignatureAlgorithm, policy *config.Signing) (*Signer, error) {
if policy == nil {
policy = &config.Signing{
Profiles: map[string]*config.SigningProfile{},
Default: config.DefaultConfig()}
}
if !policy.Valid() {
return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
}
return &Signer{
ca: cert,
priv: priv,
sigAlgo: sigAlgo,
policy: policy,
}, nil
}
// NewSignerFromFile generates a new local signer from a caFile
// and a caKey file, both PEM encoded.
func NewSignerFromFile(caFile, caKeyFile string, policy *config.Signing) (*Signer, error) {
log.Debug("Loading CA: ", caFile)
ca, err := helpers.ReadBytes(caFile)
if err != nil {
return nil, err
}
log.Debug("Loading CA key: ", caKeyFile)
cakey, err := helpers.ReadBytes(caKeyFile)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ReadFailed, err)
}
parsedCa, err := helpers.ParseCertificatePEM(ca)
if err != nil {
return nil, err
}
strPassword := os.Getenv("CFSSL_CA_PK_PASSWORD")
password := []byte(strPassword)
if strPassword == "" {
password = nil
}
priv, err := helpers.ParsePrivateKeyPEMWithPassword(cakey, password)
if err != nil {
log.Debug("Malformed private key %v", err)
return nil, err
}
return NewSigner(priv, parsedCa, signer.DefaultSigAlgo(priv), policy)
}
func (s *Signer) sign(template *x509.Certificate) (cert []byte, err error) {
var initRoot bool
if s.ca == nil {
if !template.IsCA {
err = cferr.New(cferr.PolicyError, cferr.InvalidRequest)
return
}
template.DNSNames = nil
template.EmailAddresses = nil
s.ca = template
initRoot = true
}
derBytes, err := x509.CreateCertificate(rand.Reader, template, s.ca, template.PublicKey, s.priv)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err)
}
if initRoot {
s.ca, err = x509.ParseCertificate(derBytes)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
}
}
cert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
log.Infof("signed certificate with serial number %d", template.SerialNumber)
return
}
// replaceSliceIfEmpty replaces the contents of replaced with newContents if
// the slice referenced by replaced is empty
func replaceSliceIfEmpty(replaced, newContents *[]string) {
if len(*replaced) == 0 {
*replaced = *newContents
}
}
// PopulateSubjectFromCSR has functionality similar to Name, except
// it fills the fields of the resulting pkix.Name with req's if the
// subject's corresponding fields are empty
func PopulateSubjectFromCSR(s *signer.Subject, req pkix.Name) pkix.Name {
// if no subject, use req
if s == nil {
return req
}
name := s.Name()
if name.CommonName == "" {
name.CommonName = req.CommonName
}
replaceSliceIfEmpty(&name.Country, &req.Country)
replaceSliceIfEmpty(&name.Province, &req.Province)
replaceSliceIfEmpty(&name.Locality, &req.Locality)
replaceSliceIfEmpty(&name.Organization, &req.Organization)
replaceSliceIfEmpty(&name.OrganizationalUnit, &req.OrganizationalUnit)
if name.SerialNumber == "" {
name.SerialNumber = req.SerialNumber
}
return name
}
// OverrideHosts fills template's IPAddresses, EmailAddresses, and DNSNames with the
// content of hosts, if it is not nil.
func OverrideHosts(template *x509.Certificate, hosts []string) {
if hosts != nil {
template.IPAddresses = []net.IP{}
template.EmailAddresses = []string{}
template.DNSNames = []string{}
}
for i := range hosts {
if ip := net.ParseIP(hosts[i]); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip)
} else if email, err := mail.ParseAddress(hosts[i]); err == nil && email != nil {
template.EmailAddresses = append(template.EmailAddresses, email.Address)
} else {
template.DNSNames = append(template.DNSNames, hosts[i])
}
}
}
// Sign signs a new certificate based on the PEM-encoded client
// certificate or certificate request with the signing profile,
// specified by profileName.
func (s *Signer) Sign(req signer.SignRequest) (cert []byte, err error) {
profile, err := signer.Profile(s, req.Profile)
if err != nil {
return
}
block, _ := pem.Decode([]byte(req.Request))
if block == nil {
return nil, cferr.New(cferr.CSRError, cferr.DecodeFailed)
}
if block.Type != "NEW CERTIFICATE REQUEST" && block.Type != "CERTIFICATE REQUEST" {
return nil, cferr.Wrap(cferr.CSRError,
cferr.BadRequest, errors.New("not a csr"))
}
csrTemplate, err := signer.ParseCertificateRequest(s, block.Bytes)
if err != nil {
return nil, err
}
// Copy out only the fields from the CSR authorized by policy.
safeTemplate := x509.Certificate{}
// If the profile contains no explicit whitelist, assume that all fields
// should be copied from the CSR.
if profile.CSRWhitelist == nil {
safeTemplate = *csrTemplate
} else {
if profile.CSRWhitelist.Subject {
safeTemplate.Subject = csrTemplate.Subject
}
if profile.CSRWhitelist.PublicKeyAlgorithm {
safeTemplate.PublicKeyAlgorithm = csrTemplate.PublicKeyAlgorithm
}
if profile.CSRWhitelist.PublicKey {
safeTemplate.PublicKey = csrTemplate.PublicKey
}
if profile.CSRWhitelist.SignatureAlgorithm {
safeTemplate.SignatureAlgorithm = csrTemplate.SignatureAlgorithm
}
if profile.CSRWhitelist.DNSNames {
safeTemplate.DNSNames = csrTemplate.DNSNames
}
if profile.CSRWhitelist.IPAddresses {
safeTemplate.IPAddresses = csrTemplate.IPAddresses
}
if profile.CSRWhitelist.EmailAddresses {
safeTemplate.EmailAddresses = csrTemplate.EmailAddresses
}
}
if req.CRLOverride != "" {
safeTemplate.CRLDistributionPoints = []string{req.CRLOverride}
}
if safeTemplate.IsCA {
if !profile.CAConstraint.IsCA {
log.Error("local signer policy disallows issuing CA certificate")
return nil, cferr.New(cferr.PolicyError, cferr.InvalidRequest)
}
if s.ca != nil && s.ca.MaxPathLen > 0 {
if safeTemplate.MaxPathLen >= s.ca.MaxPathLen {
log.Error("local signer certificate disallows CA MaxPathLen extending")
// do not sign a cert with pathlen > current
return nil, cferr.New(cferr.PolicyError, cferr.InvalidRequest)
}
} else if s.ca != nil && s.ca.MaxPathLen == 0 && s.ca.MaxPathLenZero {
log.Error("local signer certificate disallows issuing CA certificate")
// signer has pathlen of 0, do not sign more intermediate CAs
return nil, cferr.New(cferr.PolicyError, cferr.InvalidRequest)
}
}
OverrideHosts(&safeTemplate, req.Hosts)
safeTemplate.Subject = PopulateSubjectFromCSR(req.Subject, safeTemplate.Subject)
// If there is a whitelist, ensure that both the Common Name and SAN DNSNames match
if profile.NameWhitelist != nil {
if safeTemplate.Subject.CommonName != "" {
if profile.NameWhitelist.Find([]byte(safeTemplate.Subject.CommonName)) == nil {
return nil, cferr.New(cferr.PolicyError, cferr.UnmatchedWhitelist)
}
}
for _, name := range safeTemplate.DNSNames {
if profile.NameWhitelist.Find([]byte(name)) == nil {
return nil, cferr.New(cferr.PolicyError, cferr.UnmatchedWhitelist)
}
}
for _, name := range safeTemplate.EmailAddresses {
if profile.NameWhitelist.Find([]byte(name)) == nil {
return nil, cferr.New(cferr.PolicyError, cferr.UnmatchedWhitelist)
}
}
}
if profile.ClientProvidesSerialNumbers {
if req.Serial == nil {
return nil, cferr.New(cferr.CertificateError, cferr.MissingSerial)
}
safeTemplate.SerialNumber = req.Serial
} else {
// RFC 5280 4.1.2.2:
// Certificate users MUST be able to handle serialNumber
// values up to 20 octets. Conforming CAs MUST NOT use
// serialNumber values longer than 20 octets.
//
// If CFSSL is providing the serial numbers, it makes
// sense to use the max supported size.
serialNumber := make([]byte, 20)
_, err = io.ReadFull(rand.Reader, serialNumber)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err)
}
// SetBytes interprets buf as the bytes of a big-endian
// unsigned integer. The leading byte should be masked
// off to ensure it isn't negative.
serialNumber[0] &= 0x7F
safeTemplate.SerialNumber = new(big.Int).SetBytes(serialNumber)
}
if len(req.Extensions) > 0 {
for _, ext := range req.Extensions {
oid := asn1.ObjectIdentifier(ext.ID)
if !profile.ExtensionWhitelist[oid.String()] {
return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest)
}
rawValue, err := hex.DecodeString(ext.Value)
if err != nil {
return nil, cferr.Wrap(cferr.CertificateError, cferr.InvalidRequest, err)
}
safeTemplate.ExtraExtensions = append(safeTemplate.ExtraExtensions, pkix.Extension{
Id: oid,
Critical: ext.Critical,
Value: rawValue,
})
}
}
var distPoints = safeTemplate.CRLDistributionPoints
err = signer.FillTemplate(&safeTemplate, s.policy.Default, profile, req.NotBefore, req.NotAfter)
if err != nil {
return nil, err
}
if distPoints != nil && len(distPoints) > 0 {
safeTemplate.CRLDistributionPoints = distPoints
}
var certTBS = safeTemplate
if len(profile.CTLogServers) > 0 || req.ReturnPrecert {
// Add a poison extension which prevents validation
var poisonExtension = pkix.Extension{Id: signer.CTPoisonOID, Critical: true, Value: []byte{0x05, 0x00}}
var poisonedPreCert = certTBS
poisonedPreCert.ExtraExtensions = append(safeTemplate.ExtraExtensions, poisonExtension)
cert, err = s.sign(&poisonedPreCert)
if err != nil {
return
}
if req.ReturnPrecert {
return cert, nil
}
derCert, _ := pem.Decode(cert)
prechain := []ct.ASN1Cert{{Data: derCert.Bytes}, {Data: s.ca.Raw}}
var sctList []ct.SignedCertificateTimestamp
for _, server := range profile.CTLogServers {
log.Infof("submitting poisoned precertificate to %s", server)
ctclient, err := client.New(server, nil, jsonclient.Options{})
if err != nil {
return nil, cferr.Wrap(cferr.CTError, cferr.PrecertSubmissionFailed, err)
}
var resp *ct.SignedCertificateTimestamp
ctx := context.Background()
resp, err = ctclient.AddPreChain(ctx, prechain)
if err != nil {
return nil, cferr.Wrap(cferr.CTError, cferr.PrecertSubmissionFailed, err)
}
sctList = append(sctList, *resp)
}
var serializedSCTList []byte
serializedSCTList, err = helpers.SerializeSCTList(sctList)
if err != nil {
return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err)
}
// Serialize again as an octet string before embedding
serializedSCTList, err = asn1.Marshal(serializedSCTList)
if err != nil {
return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err)
}
var SCTListExtension = pkix.Extension{Id: signer.SCTListOID, Critical: false, Value: serializedSCTList}
certTBS.ExtraExtensions = append(certTBS.ExtraExtensions, SCTListExtension)
}
var signedCert []byte
signedCert, err = s.sign(&certTBS)
if err != nil {
return nil, err
}
// Get the AKI from signedCert. This is required to support Go 1.9+.
// In prior versions of Go, x509.CreateCertificate updated the
// AuthorityKeyId of certTBS.
parsedCert, _ := helpers.ParseCertificatePEM(signedCert)
if s.dbAccessor != nil {
var certRecord = certdb.CertificateRecord{
Serial: certTBS.SerialNumber.String(),
// this relies on the specific behavior of x509.CreateCertificate
// which sets the AuthorityKeyId from the signer's SubjectKeyId
AKI: hex.EncodeToString(parsedCert.AuthorityKeyId),
CALabel: req.Label,
Status: "good",
Expiry: certTBS.NotAfter,
PEM: string(signedCert),
}
err = s.dbAccessor.InsertCertificate(certRecord)
if err != nil {
return nil, err
}
log.Debug("saved certificate with serial number ", certTBS.SerialNumber)
}
return signedCert, nil
}
// SignFromPrecert creates and signs a certificate from an existing precertificate
// that was previously signed by Signer.ca and inserts the provided SCTs into the
// new certificate. The resulting certificate will be a exact copy of the precert
// except for the removal of the poison extension and the addition of the SCT list
// extension. SignFromPrecert does not verify that the contents of the certificate
// still match the signing profile of the signer, it only requires that the precert
// was previously signed by the Signers CA.
func (s *Signer) SignFromPrecert(precert *x509.Certificate, scts []ct.SignedCertificateTimestamp) ([]byte, error) {
// Verify certificate was signed by s.ca
if err := precert.CheckSignatureFrom(s.ca); err != nil {
return nil, err
}
// Verify certificate is a precert
isPrecert := false
poisonIndex := 0
for i, ext := range precert.Extensions {
if ext.Id.Equal(signer.CTPoisonOID) {
if !ext.Critical {
return nil, cferr.New(cferr.CTError, cferr.PrecertInvalidPoison)
}
// Check extension contains ASN.1 NULL
if bytes.Compare(ext.Value, []byte{0x05, 0x00}) != 0 {
return nil, cferr.New(cferr.CTError, cferr.PrecertInvalidPoison)
}
isPrecert = true
poisonIndex = i
break
}
}
if !isPrecert {
return nil, cferr.New(cferr.CTError, cferr.PrecertMissingPoison)
}
// Serialize SCTs into list format and create extension
serializedList, err := helpers.SerializeSCTList(scts)
if err != nil {
return nil, err
}
// Serialize again as an octet string before embedding
serializedList, err = asn1.Marshal(serializedList)
if err != nil {
return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err)
}
sctExt := pkix.Extension{Id: signer.SCTListOID, Critical: false, Value: serializedList}
// Create the new tbsCert from precert. Do explicit copies of any slices so that we don't
// use memory that may be altered by us or the caller at a later stage.
tbsCert := x509.Certificate{
SignatureAlgorithm: precert.SignatureAlgorithm,
PublicKeyAlgorithm: precert.PublicKeyAlgorithm,
PublicKey: precert.PublicKey,
Version: precert.Version,
SerialNumber: precert.SerialNumber,
Issuer: precert.Issuer,
Subject: precert.Subject,
NotBefore: precert.NotBefore,
NotAfter: precert.NotAfter,
KeyUsage: precert.KeyUsage,
BasicConstraintsValid: precert.BasicConstraintsValid,
IsCA: precert.IsCA,
MaxPathLen: precert.MaxPathLen,
MaxPathLenZero: precert.MaxPathLenZero,
PermittedDNSDomainsCritical: precert.PermittedDNSDomainsCritical,
}
if len(precert.Extensions) > 0 {
tbsCert.ExtraExtensions = make([]pkix.Extension, len(precert.Extensions))
copy(tbsCert.ExtraExtensions, precert.Extensions)
}
// Remove the poison extension from ExtraExtensions
tbsCert.ExtraExtensions = append(tbsCert.ExtraExtensions[:poisonIndex], tbsCert.ExtraExtensions[poisonIndex+1:]...)
// Insert the SCT list extension
tbsCert.ExtraExtensions = append(tbsCert.ExtraExtensions, sctExt)
// Sign the tbsCert
return s.sign(&tbsCert)
}
// Info return a populated info.Resp struct or an error.
func (s *Signer) Info(req info.Req) (resp *info.Resp, err error) {
cert, err := s.Certificate(req.Label, req.Profile)
if err != nil {
return
}
profile, err := signer.Profile(s, req.Profile)
if err != nil {
return
}
resp = new(info.Resp)
if cert.Raw != nil {
resp.Certificate = string(bytes.TrimSpace(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})))
}
resp.Usage = profile.Usage
resp.ExpiryString = profile.ExpiryString
return
}
// SigAlgo returns the RSA signer's signature algorithm.
func (s *Signer) SigAlgo() x509.SignatureAlgorithm {
return s.sigAlgo
}
// Certificate returns the signer's certificate.
func (s *Signer) Certificate(label, profile string) (*x509.Certificate, error) {
cert := *s.ca
return &cert, nil
}
// SetPolicy sets the signer's signature policy.
func (s *Signer) SetPolicy(policy *config.Signing) {
s.policy = policy
}
// SetDBAccessor sets the signers' cert db accessor
func (s *Signer) SetDBAccessor(dba certdb.Accessor) {
s.dbAccessor = dba
}
// GetDBAccessor returns the signers' cert db accessor
func (s *Signer) GetDBAccessor() certdb.Accessor {
return s.dbAccessor
}
// SetReqModifier does nothing for local
func (s *Signer) SetReqModifier(func(*http.Request, []byte)) {
// noop
}
// Policy returns the signer's policy.
func (s *Signer) Policy() *config.Signing {
return s.policy
}

View File

@ -1,436 +0,0 @@
// Package signer implements certificate signature functionality for CFSSL.
package signer
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/sha1"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"errors"
"math/big"
"net/http"
"strings"
"time"
"github.com/cloudflare/cfssl/certdb"
"github.com/cloudflare/cfssl/config"
"github.com/cloudflare/cfssl/csr"
cferr "github.com/cloudflare/cfssl/errors"
"github.com/cloudflare/cfssl/info"
)
// Subject contains the information that should be used to override the
// subject information when signing a certificate.
type Subject struct {
CN string
Names []csr.Name `json:"names"`
SerialNumber string
}
// Extension represents a raw extension to be included in the certificate. The
// "value" field must be hex encoded.
type Extension struct {
ID config.OID `json:"id"`
Critical bool `json:"critical"`
Value string `json:"value"`
}
// SignRequest stores a signature request, which contains the hostname,
// the CSR, optional subject information, and the signature profile.
//
// Extensions provided in the signRequest are copied into the certificate, as
// long as they are in the ExtensionWhitelist for the signer's policy.
// Extensions requested in the CSR are ignored, except for those processed by
// ParseCertificateRequest (mainly subjectAltName).
type SignRequest struct {
Hosts []string `json:"hosts"`
Request string `json:"certificate_request"`
Subject *Subject `json:"subject,omitempty"`
Profile string `json:"profile"`
CRLOverride string `json:"crl_override"`
Label string `json:"label"`
Serial *big.Int `json:"serial,omitempty"`
Extensions []Extension `json:"extensions,omitempty"`
// If provided, NotBefore will be used without modification (except
// for canonicalization) as the value of the notBefore field of the
// certificate. In particular no backdating adjustment will be made
// when NotBefore is provided.
NotBefore time.Time
// If provided, NotAfter will be used without modification (except
// for canonicalization) as the value of the notAfter field of the
// certificate.
NotAfter time.Time
// If ReturnPrecert is true a certificate with the CT poison extension
// will be returned from the Signer instead of attempting to retrieve
// SCTs and populate the tbsCert with them itself. This precert can then
// be passed to SignFromPrecert with the SCTs in order to create a
// valid certificate.
ReturnPrecert bool
}
// appendIf appends to a if s is not an empty string.
func appendIf(s string, a *[]string) {
if s != "" {
*a = append(*a, s)
}
}
// Name returns the PKIX name for the subject.
func (s *Subject) Name() pkix.Name {
var name pkix.Name
name.CommonName = s.CN
for _, n := range s.Names {
appendIf(n.C, &name.Country)
appendIf(n.ST, &name.Province)
appendIf(n.L, &name.Locality)
appendIf(n.O, &name.Organization)
appendIf(n.OU, &name.OrganizationalUnit)
}
name.SerialNumber = s.SerialNumber
return name
}
// SplitHosts takes a comma-spearated list of hosts and returns a slice
// with the hosts split
func SplitHosts(hostList string) []string {
if hostList == "" {
return nil
}
return strings.Split(hostList, ",")
}
// A Signer contains a CA's certificate and private key for signing
// certificates, a Signing policy to refer to and a SignatureAlgorithm.
type Signer interface {
Info(info.Req) (*info.Resp, error)
Policy() *config.Signing
SetDBAccessor(certdb.Accessor)
GetDBAccessor() certdb.Accessor
SetPolicy(*config.Signing)
SigAlgo() x509.SignatureAlgorithm
Sign(req SignRequest) (cert []byte, err error)
SetReqModifier(func(*http.Request, []byte))
}
// Profile gets the specific profile from the signer
func Profile(s Signer, profile string) (*config.SigningProfile, error) {
var p *config.SigningProfile
policy := s.Policy()
if policy != nil && policy.Profiles != nil && profile != "" {
p = policy.Profiles[profile]
}
if p == nil && policy != nil {
p = policy.Default
}
if p == nil {
return nil, cferr.Wrap(cferr.APIClientError, cferr.ClientHTTPError, errors.New("profile must not be nil"))
}
return p, nil
}
// DefaultSigAlgo returns an appropriate X.509 signature algorithm given
// the CA's private key.
func DefaultSigAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
pub := priv.Public()
switch pub := pub.(type) {
case *rsa.PublicKey:
keySize := pub.N.BitLen()
switch {
case keySize >= 4096:
return x509.SHA512WithRSA
case keySize >= 3072:
return x509.SHA384WithRSA
case keySize >= 2048:
return x509.SHA256WithRSA
default:
return x509.SHA1WithRSA
}
case *ecdsa.PublicKey:
switch pub.Curve {
case elliptic.P256():
return x509.ECDSAWithSHA256
case elliptic.P384():
return x509.ECDSAWithSHA384
case elliptic.P521():
return x509.ECDSAWithSHA512
default:
return x509.ECDSAWithSHA1
}
default:
return x509.UnknownSignatureAlgorithm
}
}
// ParseCertificateRequest takes an incoming certificate request and
// builds a certificate template from it.
func ParseCertificateRequest(s Signer, csrBytes []byte) (template *x509.Certificate, err error) {
csrv, err := x509.ParseCertificateRequest(csrBytes)
if err != nil {
err = cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err)
return
}
err = csrv.CheckSignature()
if err != nil {
err = cferr.Wrap(cferr.CSRError, cferr.KeyMismatch, err)
return
}
template = &x509.Certificate{
Subject: csrv.Subject,
PublicKeyAlgorithm: csrv.PublicKeyAlgorithm,
PublicKey: csrv.PublicKey,
SignatureAlgorithm: s.SigAlgo(),
DNSNames: csrv.DNSNames,
IPAddresses: csrv.IPAddresses,
EmailAddresses: csrv.EmailAddresses,
}
for _, val := range csrv.Extensions {
// Check the CSR for the X.509 BasicConstraints (RFC 5280, 4.2.1.9)
// extension and append to template if necessary
if val.Id.Equal(asn1.ObjectIdentifier{2, 5, 29, 19}) {
var constraints csr.BasicConstraints
var rest []byte
if rest, err = asn1.Unmarshal(val.Value, &constraints); err != nil {
return nil, cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err)
} else if len(rest) != 0 {
return nil, cferr.Wrap(cferr.CSRError, cferr.ParseFailed, errors.New("x509: trailing data after X.509 BasicConstraints"))
}
template.BasicConstraintsValid = true
template.IsCA = constraints.IsCA
template.MaxPathLen = constraints.MaxPathLen
template.MaxPathLenZero = template.MaxPathLen == 0
}
}
return
}
type subjectPublicKeyInfo struct {
Algorithm pkix.AlgorithmIdentifier
SubjectPublicKey asn1.BitString
}
// ComputeSKI derives an SKI from the certificate's public key in a
// standard manner. This is done by computing the SHA-1 digest of the
// SubjectPublicKeyInfo component of the certificate.
func ComputeSKI(template *x509.Certificate) ([]byte, error) {
pub := template.PublicKey
encodedPub, err := x509.MarshalPKIXPublicKey(pub)
if err != nil {
return nil, err
}
var subPKI subjectPublicKeyInfo
_, err = asn1.Unmarshal(encodedPub, &subPKI)
if err != nil {
return nil, err
}
pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes)
return pubHash[:], nil
}
// FillTemplate is a utility function that tries to load as much of
// the certificate template as possible from the profiles and current
// template. It fills in the key uses, expiration, revocation URLs
// and SKI.
func FillTemplate(template *x509.Certificate, defaultProfile, profile *config.SigningProfile, notBefore time.Time, notAfter time.Time) error {
ski, err := ComputeSKI(template)
if err != nil {
return err
}
var (
eku []x509.ExtKeyUsage
ku x509.KeyUsage
backdate time.Duration
expiry time.Duration
crlURL, ocspURL string
issuerURL = profile.IssuerURL
)
// The third value returned from Usages is a list of unknown key usages.
// This should be used when validating the profile at load, and isn't used
// here.
ku, eku, _ = profile.Usages()
if profile.IssuerURL == nil {
issuerURL = defaultProfile.IssuerURL
}
if ku == 0 && len(eku) == 0 {
return cferr.New(cferr.PolicyError, cferr.NoKeyUsages)
}
if expiry = profile.Expiry; expiry == 0 {
expiry = defaultProfile.Expiry
}
if crlURL = profile.CRL; crlURL == "" {
crlURL = defaultProfile.CRL
}
if ocspURL = profile.OCSP; ocspURL == "" {
ocspURL = defaultProfile.OCSP
}
if notBefore.IsZero() {
if !profile.NotBefore.IsZero() {
notBefore = profile.NotBefore
} else {
if backdate = profile.Backdate; backdate == 0 {
backdate = -5 * time.Minute
} else {
backdate = -1 * profile.Backdate
}
notBefore = time.Now().Round(time.Minute).Add(backdate)
}
}
notBefore = notBefore.UTC()
if notAfter.IsZero() {
if !profile.NotAfter.IsZero() {
notAfter = profile.NotAfter
} else {
notAfter = notBefore.Add(expiry)
}
}
notAfter = notAfter.UTC()
template.NotBefore = notBefore
template.NotAfter = notAfter
template.KeyUsage = ku
template.ExtKeyUsage = eku
template.BasicConstraintsValid = true
template.IsCA = profile.CAConstraint.IsCA
if template.IsCA {
template.MaxPathLen = profile.CAConstraint.MaxPathLen
if template.MaxPathLen == 0 {
template.MaxPathLenZero = profile.CAConstraint.MaxPathLenZero
}
template.DNSNames = nil
template.EmailAddresses = nil
}
template.SubjectKeyId = ski
if ocspURL != "" {
template.OCSPServer = []string{ocspURL}
}
if crlURL != "" {
template.CRLDistributionPoints = []string{crlURL}
}
if len(issuerURL) != 0 {
template.IssuingCertificateURL = issuerURL
}
if len(profile.Policies) != 0 {
err = addPolicies(template, profile.Policies)
if err != nil {
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
}
}
if profile.OCSPNoCheck {
ocspNoCheckExtension := pkix.Extension{
Id: asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 5},
Critical: false,
Value: []byte{0x05, 0x00},
}
template.ExtraExtensions = append(template.ExtraExtensions, ocspNoCheckExtension)
}
return nil
}
type policyInformation struct {
PolicyIdentifier asn1.ObjectIdentifier
Qualifiers []interface{} `asn1:"tag:optional,omitempty"`
}
type cpsPolicyQualifier struct {
PolicyQualifierID asn1.ObjectIdentifier
Qualifier string `asn1:"tag:optional,ia5"`
}
type userNotice struct {
ExplicitText string `asn1:"tag:optional,utf8"`
}
type userNoticePolicyQualifier struct {
PolicyQualifierID asn1.ObjectIdentifier
Qualifier userNotice
}
var (
// Per https://tools.ietf.org/html/rfc3280.html#page-106, this represents:
// iso(1) identified-organization(3) dod(6) internet(1) security(5)
// mechanisms(5) pkix(7) id-qt(2) id-qt-cps(1)
iDQTCertificationPracticeStatement = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1}
// iso(1) identified-organization(3) dod(6) internet(1) security(5)
// mechanisms(5) pkix(7) id-qt(2) id-qt-unotice(2)
iDQTUserNotice = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 2}
// CTPoisonOID is the object ID of the critical poison extension for precertificates
// https://tools.ietf.org/html/rfc6962#page-9
CTPoisonOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}
// SCTListOID is the object ID for the Signed Certificate Timestamp certificate extension
// https://tools.ietf.org/html/rfc6962#page-14
SCTListOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
)
// addPolicies adds Certificate Policies and optional Policy Qualifiers to a
// certificate, based on the input config. Go's x509 library allows setting
// Certificate Policies easily, but does not support nested Policy Qualifiers
// under those policies. So we need to construct the ASN.1 structure ourselves.
func addPolicies(template *x509.Certificate, policies []config.CertificatePolicy) error {
asn1PolicyList := []policyInformation{}
for _, policy := range policies {
pi := policyInformation{
// The PolicyIdentifier is an OID assigned to a given issuer.
PolicyIdentifier: asn1.ObjectIdentifier(policy.ID),
}
for _, qualifier := range policy.Qualifiers {
switch qualifier.Type {
case "id-qt-unotice":
pi.Qualifiers = append(pi.Qualifiers,
userNoticePolicyQualifier{
PolicyQualifierID: iDQTUserNotice,
Qualifier: userNotice{
ExplicitText: qualifier.Value,
},
})
case "id-qt-cps":
pi.Qualifiers = append(pi.Qualifiers,
cpsPolicyQualifier{
PolicyQualifierID: iDQTCertificationPracticeStatement,
Qualifier: qualifier.Value,
})
default:
return errors.New("Invalid qualifier type in Policies " + qualifier.Type)
}
}
asn1PolicyList = append(asn1PolicyList, pi)
}
asn1Bytes, err := asn1.Marshal(asn1PolicyList)
if err != nil {
return err
}
template.ExtraExtensions = append(template.ExtraExtensions, pkix.Extension{
Id: asn1.ObjectIdentifier{2, 5, 29, 32},
Critical: false,
Value: asn1Bytes,
})
return nil
}

View File

@ -1,29 +0,0 @@
*.iml
*.swo
*.swp
*.tfstate
*.tfstate.backup
*~
/.idea
/certcheck
/chainfix
/coverage.txt
/createtree
/crlcheck
/ctclient
/ct_server
/ct_hammer
/data
/dumpscts
/etcdiscover
/findlog
/goshawk
/gosmin
/gossip_server
/preloader
/scanlog
/sctcheck
/sctscan
/trillian_log_server
/trillian_log_signer
/trillian.json

View File

@ -1,87 +0,0 @@
sudo: true # required for CI push into Kubernetes.
language: go
os: linux
go: "1.10"
go_import_path: github.com/google/certificate-transparency-go
env:
- GCE_CI=${ENABLE_GCE_CI} GOFLAGS=
- GOFLAGS=-race
- GOFLAGS= WITH_ETCD=true WITH_COVERAGE=true
- GOFLAGS=-race WITH_ETCD=true
matrix:
fast_finish: true
addons:
apt:
sources:
- mysql-5.7-trusty
packages:
- mysql-server
- mysql-client
services:
- docker
before_install:
- sudo mysql -e "use mysql; update user set authentication_string=PASSWORD('') where User='root'; update user set plugin='mysql_native_password';FLUSH PRIVILEGES;"
- sudo mysql_upgrade
- sudo service mysql restart
install:
- mkdir ../protoc
- |
(
cd ../protoc
wget https://github.com/google/protobuf/releases/download/v3.5.1/protoc-3.5.1-${TRAVIS_OS_NAME}-x86_64.zip
unzip protoc-3.5.1-${TRAVIS_OS_NAME}-x86_64.zip
)
- export PATH=$(pwd)/../protoc/bin:$PATH
- go get -d -t ./...
- go get github.com/alecthomas/gometalinter
- gometalinter --install
- go get -u github.com/golang/protobuf/proto
- go get -u github.com/golang/protobuf/protoc-gen-go
- go install github.com/golang/mock/mockgen
# install vendored etcd binary
- go install ./vendor/github.com/coreos/etcd/cmd/etcd
- go install ./vendor/github.com/coreos/etcd/cmd/etcdctl
- pushd ${GOPATH}/src/github.com/google/trillian
- go get -d -t ./...
- popd
script:
- set -e
- cd $HOME/gopath/src/github.com/google/certificate-transparency-go
- ./scripts/presubmit.sh ${PRESUBMIT_OPTS} ${WITH_COVERAGE:+--coverage}
- |
# Check re-generation didn't change anything
status=$(git status --porcelain | grep -v coverage) || :
if [[ -n ${status} ]]; then
echo "Regenerated files differ from checked-in versions: ${status}"
git status
git diff
exit 1
fi
- |
if [[ "${WITH_ETCD}" == "true" ]]; then
export ETCD_DIR="${GOPATH}/bin"
fi
- ./trillian/integration/integration_test.sh
- HAMMER_OPTS="--operations=1500" ./trillian/integration/ct_hammer_test.sh
- set +e
after_success:
- cp /tmp/coverage.txt .
- bash <(curl -s https://codecov.io/bash)
- |
# Push up to GCE CI instance if we're running after a merge to master
if [[ "${GCE_CI}" == "true" ]] && [[ $TRAVIS_PULL_REQUEST == "false" ]] && [[ $TRAVIS_BRANCH == "master" ]]; then
. scripts/install_cloud.sh
echo ${GCLOUD_SERVICE_KEY_CI} | base64 --decode -i > ${HOME}/gcloud-service-key.json
gcloud auth activate-service-account --key-file ${HOME}/gcloud-service-key.json
rm ${HOME}/gcloud-service-key.json
. scripts/deploy_gce_ci.sh
fi

View File

@ -1,27 +0,0 @@
# This is the official list of benchmark authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
#
# Please keep the list sorted.
Comodo CA Limited
Ed Maste <emaste@freebsd.org>
Fiaz Hossain <fiaz.hossain@salesforce.com>
Google Inc.
Internet Security Research Group
Jeff Trawick <trawick@gmail.com>
Katriel Cohn-Gordon <katriel.cohn-gordon@cybersecurity.ox.ac.uk>
Laël Cellier <lael.cellier@gmail.com>
Mark Schloesser <ms@mwcollect.org>
NORDUnet A/S
Nicholas Galbreath <nickg@client9.com>
Oliver Weidner <Oliver.Weidner@gmail.com>
PrimeKey Solutions AB
Ruslan Kovalov <ruslan.kovalyov@gmail.com>
Venafi, Inc.
Vladimir Rutsky <vladimir@rutsky.org>
Ximin Luo <infinity0@gmx.com>

View File

@ -1,38 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"serialization.go",
"signatures.go",
"types.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/google/certificate-transparency-go",
importpath = "github.com/google/certificate-transparency-go",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/google/certificate-transparency-go/tls:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/x509:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/google/certificate-transparency-go/asn1:all-srcs",
"//vendor/github.com/google/certificate-transparency-go/client:all-srcs",
"//vendor/github.com/google/certificate-transparency-go/jsonclient:all-srcs",
"//vendor/github.com/google/certificate-transparency-go/tls:all-srcs",
"//vendor/github.com/google/certificate-transparency-go/x509:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,208 +0,0 @@
# CERTIFICATE-TRANSPARENCY-GO Changelog
## v1.0.20 - Minimal Gossip / Go 1.11 Fix / Utility Improvements
Published 2018-07-05 09:21:34 +0000 UTC
Enhancements have been made to various utilities including `scanner`, `sctcheck`, `loglist` and `x509util`.
The `allow_verification_with_non_compliant_keys` flag has been removed from `signatures.go`.
An implementation of Gossip has been added. See the `gossip/minimal` package for more information.
An X.509 compatibility issue for Go 1.11 has been fixed. This should be backwards compatible with 1.10.
Commit [37a384cd035e722ea46e55029093e26687138edf](https://api.github.com/repos/google/certificate-transparency-go/commits/37a384cd035e722ea46e55029093e26687138edf) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.20)
## v1.0.19 - CTFE User Quota
Published 2018-06-01 13:51:52 +0000 UTC
CTFE now supports Trillian Log's explicit quota API; quota can be requested based on the remote user's IP, as well as per-issuing certificate in submitted chains.
Commit [8736a411b4ff214ea20687e46c2b67d66ebd83fc](https://api.github.com/repos/google/certificate-transparency-go/commits/8736a411b4ff214ea20687e46c2b67d66ebd83fc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.19)
## v1.0.18 - Adding Migration Tool / Client Additions / K8 Config
Published 2018-06-01 14:28:20 +0000 UTC
Work on a log migration tool (Migrillian) is in progress. This is not yet ready for production use but will provide features for mirroring and migrating logs.
The `RequestLog` API allows for logging of SCTs when they are issued by CTFE.
The CT Go client now supports `GetEntryAndProof`. Utilities have been switched over to use the `glog` package.
Commit [77abf2dac5410a62c04ac1c662c6d0fa54afc2dc](https://api.github.com/repos/google/certificate-transparency-go/commits/77abf2dac5410a62c04ac1c662c6d0fa54afc2dc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.18)
## v1.0.17 - Merkle verification / Tracing / Demo script / CORS
Published 2018-06-01 14:25:16 +0000 UTC
Now uses Merkle Tree verification from Trillian.
The CT server now supports CORS.
Request tracing added using OpenCensus. For GCE / K8 it just requires the flag to be enabled to export traces to Stackdriver. Other environments may differ.
A demo script was added that goes through setting up a simple deployment suitable for development / demo purposes. This may be useful for those new to the project.
Commit [3c3d22ce946447d047a03228ebb4a41e3e4eb15b](https://api.github.com/repos/google/certificate-transparency-go/commits/3c3d22ce946447d047a03228ebb4a41e3e4eb15b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.17)
## v1.0.16 - Lifecycle test / Go 1.10.1
Published 2018-06-01 14:22:23 +0000 UTC
An integration test was added that goes through a create / drain queue / freeze lifecycle for a log.
Changes to `x509` were merged from Go 1.10.1.
Commit [a72423d09b410b80673fd1135ba1022d04bac6cd](https://api.github.com/repos/google/certificate-transparency-go/commits/a72423d09b410b80673fd1135ba1022d04bac6cd) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.16)
## v1.0.15 - More control of verification, grpclb, stackdriver metrics
Published 2018-06-01 14:20:32 +0000 UTC
Facilities were added to the `x509` package to control whether verification checks are applied.
Log server requests are now balanced using `gRPClb`.
For Kubernetes, metrics can be published to Stackdriver monitoring.
Commit [684d6eee6092774e54d301ccad0ed61bc8d010c1](https://api.github.com/repos/google/certificate-transparency-go/commits/684d6eee6092774e54d301ccad0ed61bc8d010c1) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.15)
## v1.0.14 - SQLite Removed, LeafHashForLeaf
Published 2018-06-01 14:15:37 +0000 UTC
Support for SQLlite was removed. This motivation was ongoing test flakiness caused by multi-user access. This database may work for an embedded scenario but is not suitable for use in a server environment.
A `LeafHashForLeaf` client API was added and is now used by the CT client and integration tests.
Commit [698cd6a661196db4b2e71437422178ffe8705006](https://api.github.com/repos/google/certificate-transparency-go/commits/698cd6a661196db4b2e71437422178ffe8705006) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.14)
## v1.0.13 - Crypto changes, util updates, sync with trillian repo, loglist verification
Published 2018-06-01 14:15:21 +0000 UTC
Some of our custom crypto package that were wrapping calls to the standard package have been removed and the base features used directly.
Updates were made to GCE ingress and health checks.
The log list utility can verify signatures.
Commit [480c3654a70c5383b9543ec784203030aedbd3a5](https://api.github.com/repos/google/certificate-transparency-go/commits/480c3654a70c5383b9543ec784203030aedbd3a5) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.13)
## v1.0.12 - Client / util updates & CTFE fixes
Published 2018-06-01 14:13:42 +0000 UTC
The CT client can now use a JSON loglist to find logs.
CTFE had a fix applied for preissued precerts.
A DNS client was added and CT client was extended to support DNS retrieval.
Commit [74c06c95e0b304a050a1c33764c8a01d653a16e3](https://api.github.com/repos/google/certificate-transparency-go/commits/74c06c95e0b304a050a1c33764c8a01d653a16e3) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.12)
## v1.0.11 - Kubernetes CI / Integration fixes
Published 2018-06-01 14:12:18 +0000 UTC
Updates to Kubernetes configs, mostly related to running a CI instance.
Commit [0856acca7e0ab7f082ae83a1fbb5d21160962efc](https://api.github.com/repos/google/certificate-transparency-go/commits/0856acca7e0ab7f082ae83a1fbb5d21160962efc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.11)
## v1.0.10 - More scanner, x509, utility and client fixes. CTFE updates
Published 2018-06-01 14:09:47 +0000 UTC
The CT client was using the wrong protobuffer library package. To guard against this in future a check has been added to our lint config.
The `x509` and `asn1` packages have had upstream fixes applied from Go 1.10rc1.
Commit [1bec4527572c443752ad4f2830bef88be0533236](https://api.github.com/repos/google/certificate-transparency-go/commits/1bec4527572c443752ad4f2830bef88be0533236) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.10)
## v1.0.9 - Scanner, x509, utility and client fixes
Published 2018-06-01 14:11:13 +0000 UTC
The `scanner` utility now displays throughput stats.
Build instructions and README files were updated.
The `certcheck` utility can be told to ignore unknown critical X.509 extensions.
Commit [c06833528d04a94eed0c775104d1107bab9ae17c](https://api.github.com/repos/google/certificate-transparency-go/commits/c06833528d04a94eed0c775104d1107bab9ae17c) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.9)
## v1.0.8 - Client fixes, align with trillian repo
Published 2018-06-01 14:06:44 +0000 UTC
Commit [e8b02c60f294b503dbb67de0868143f5d4935e56](https://api.github.com/repos/google/certificate-transparency-go/commits/e8b02c60f294b503dbb67de0868143f5d4935e56) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.8)
## v1.0.7 - CTFE fixes
Published 2018-06-01 14:06:13 +0000 UTC
An issue was fixed with CTFE signature caching. In an unlikely set of circumstances this could lead to log mis-operation. While the chances of this are small, we recommend that versions prior to this one are not deployed.
Commit [52c0590bd3b4b80c5497005b0f47e10557425eeb](https://api.github.com/repos/google/certificate-transparency-go/commits/52c0590bd3b4b80c5497005b0f47e10557425eeb) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.7)
## v1.0.6 - crlcheck improvements / other fixes
Published 2018-06-01 14:04:22 +0000 UTC
The `crlcheck` utility has had several fixes and enhancements. Additionally the `hammer` now supports temporal logs.
Commit [3955e4a00c42e83ff17ce25003976159c5d0f0f9](https://api.github.com/repos/google/certificate-transparency-go/commits/3955e4a00c42e83ff17ce25003976159c5d0f0f9) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.6)
## v1.0.5 - X509 and asn1 fixes
Published 2018-06-01 14:02:58 +0000 UTC
This release is mostly fixes to the `x509` and `asn1` packages. Some command line utilties were also updated.
Commit [ae40d07cce12f1227c6e658e61c9dddb7646f97b](https://api.github.com/repos/google/certificate-transparency-go/commits/ae40d07cce12f1227c6e658e61c9dddb7646f97b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.5)
## v1.0.4 - Multi log backend configs
Published 2018-06-01 14:02:07 +0000 UTC
Support was added to allow CTFE to use multiple backends, each serving a distinct set of logs. It allows for e.g. regional backend deployment with common frontend servers.
Commit [62023ed90b41fa40854957b5dec7d9d73594723f](https://api.github.com/repos/google/certificate-transparency-go/commits/62023ed90b41fa40854957b5dec7d9d73594723f) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.4)
## v1.0.3 - Hammer updates, use standard context
Published 2018-06-01 14:01:11 +0000 UTC
After the Go 1.9 migration references to anything other than the standard `context` package have been removed. This is the only one that should be used from now on.
Commit [b28beed8b9aceacc705e0ff4a11d435a310e3d97](https://api.github.com/repos/google/certificate-transparency-go/commits/b28beed8b9aceacc705e0ff4a11d435a310e3d97) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.3)
## v1.0.2 - Go 1.9
Published 2018-06-01 14:00:00 +0000 UTC
Go 1.9 is now required to build the code.
Commit [3aed33d672ee43f04b1e8a00b25ca3e2e2e74309](https://api.github.com/repos/google/certificate-transparency-go/commits/3aed33d672ee43f04b1e8a00b25ca3e2e2e74309) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.2)
## v1.0.1 - Hammer and client improvements
Published 2018-06-01 13:59:29 +0000 UTC
Commit [c28796cc21776667fb05d6300e32d9517be96515](https://api.github.com/repos/google/certificate-transparency-go/commits/c28796cc21776667fb05d6300e32d9517be96515) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.1)
## v1.0 - First Trillian CT Release
Published 2018-06-01 13:59:00 +0000 UTC
This is the point that corresponds to the 1.0 release in the trillian repo.
Commit [abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d](https://api.github.com/repos/google/certificate-transparency-go/commits/abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0)

View File

@ -1,58 +0,0 @@
# How to contribute #
We'd love to accept your patches and contributions to this project. There are
a just a few small guidelines you need to follow.
## Contributor License Agreement ##
Contributions to any Google project must be accompanied by a Contributor
License Agreement. This is not a copyright **assignment**, it simply gives
Google permission to use and redistribute your contributions as part of the
project.
* If you are an individual writing original source code and you're sure you
own the intellectual property, then you'll need to sign an [individual
CLA][].
* If you work for a company that wants to allow you to contribute your work,
then you'll need to sign a [corporate CLA][].
You generally only need to submit a CLA once, so if you've already submitted
one (even if it was for a different project), you probably don't need to do it
again.
[individual CLA]: https://developers.google.com/open-source/cla/individual
[corporate CLA]: https://developers.google.com/open-source/cla/corporate
Once your CLA is submitted (or if you already submitted one for
another Google project), make a commit adding yourself to the
[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part
of your first [pull request][].
[AUTHORS]: AUTHORS
[CONTRIBUTORS]: CONTRIBUTORS
## Submitting a patch ##
1. It's generally best to start by opening a new issue describing the bug or
feature you're intending to fix. Even if you think it's relatively minor,
it's helpful to know what people are working on. Mention in the initial
issue that you are planning to work on that bug or feature so that it can
be assigned to you.
1. Follow the normal process of [forking][] the project, and setup a new
branch to work in. It's important that each group of changes be done in
separate branches in order to ensure that a pull request only includes the
commits related to that bug or feature.
1. Do your best to have [well-formed commit messages][] for each change.
This provides consistency throughout the project, and ensures that commit
messages are able to be formatted properly by various git tools.
1. Finally, push the commits to your fork and submit a [pull request][].
[forking]: https://help.github.com/articles/fork-a-repo
[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
[pull request]: https://help.github.com/articles/creating-a-pull-request

View File

@ -1,57 +0,0 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
#
# Names should be added to this file as:
# Name <email address>
#
# Please keep the list sorted.
Adam Eijdenberg <eijdenberg@google.com> <adam.eijdenberg@gmail.com>
Al Cutter <al@google.com>
Ben Laurie <benl@google.com> <ben@links.org>
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
David Drysdale <drysdale@google.com>
Deyan Bektchiev <deyan.bektchiev@venafi.com> <deyan@bektchiev.net>
Ed Maste <emaste@freebsd.org>
Emilia Kasper <ekasper@google.com>
Eran Messeri <eranm@google.com> <eran.mes@gmail.com>
Fiaz Hossain <fiaz.hossain@salesforce.com>
Gary Belvin <gbelvin@google.com> <gdbelvin@gmail.com>
Jeff Trawick <trawick@gmail.com>
Joe Tsai <joetsai@digital-static.net>
Kat Joyce <katjoyce@google.com>
Katriel Cohn-Gordon <katriel.cohn-gordon@cybersecurity.ox.ac.uk>
Kiril Nikolov <kiril.nikolov@venafi.com>
Konrad Kraszewski <kraszewski@google.com> <laiquendir@gmail.com>
Laël Cellier <lael.cellier@gmail.com>
Linus Nordberg <linus@nordu.net>
Mark Schloesser <ms@mwcollect.org>
Nicholas Galbreath <nickg@client9.com>
Oliver Weidner <Oliver.Weidner@gmail.com>
Pascal Leroy <phl@google.com>
Paul Hadfield <hadfieldp@google.com> <paul@phad.org.uk>
Paul Lietar <lietar@google.com>
Pierre Phaneuf <pphaneuf@google.com>
Rob Percival <robpercival@google.com>
Rob Stradling <rob@comodo.com>
Roland Shoemaker <roland@letsencrypt.org>
Ruslan Kovalov <ruslan.kovalyov@gmail.com>
Samuel Lidén Borell <samuel@kodafritt.se>
Vladimir Rutsky <vladimir@rutsky.org>
Ximin Luo <infinity0@gmx.com>

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,144 +0,0 @@
# Certificate Transparency: Go Code
[![Build Status](https://travis-ci.org/google/certificate-transparency-go.svg?branch=master)](https://travis-ci.org/google/certificate-transparency-go)
[![Go Report Card](https://goreportcard.com/badge/github.com/google/certificate-transparency-go)](https://goreportcard.com/report/github.com/google/certificate-transparency-go)
[![GoDoc](https://godoc.org/github.com/google/certificate-transparency-go?status.svg)](https://godoc.org/github.com/google/certificate-transparency-go)
This repository holds Go code related to
[Certificate Transparency](https://www.certificate-transparency.org/) (CT). The
repository requires Go version 1.9.
- [Repository Structure](#repository-structure)
- [Trillian CT Personality](#trillian-ct-personality)
- [Working on the Code](#working-on-the-code)
- [Rebuilding Generated Code](#rebuilding-generated-code)
- [Updating Vendor Code](#updating-vendor-code)
- [Running Codebase Checks](#running-codebase-checks)
## Repository Structure
The main parts of the repository are:
- Encoding libraries:
- `asn1/` and `x509/` are forks of the upstream Go `encoding/asn1` and
`crypto/x509` libraries. We maintain separate forks of these packages
because CT is intended to act as an observatory of certificates across the
ecosystem; as such, we need to be able to process somewhat-malformed
certificates that the stricter upstream code would (correctly) reject.
Our `x509` fork also includes code for working with the
[pre-certificates defined in RFC 6962](https://tools.ietf.org/html/rfc6962#section-3.1).
- `tls` holds a library for processing TLS-encoded data as described in
[RFC 5246](https://tools.ietf.org/html/rfc5246).
- `x509util` provides additional utilities for dealing with
`x509.Certificate`s.
- CT client libraries:
- The top-level `ct` package (in `.`) holds types and utilities for working
with CT data structures defined in
[RFC 6962](https://tools.ietf.org/html/rfc6962).
- `client/` and `jsonclient/` hold libraries that allow access to CT Logs
via entrypoints described in
[section 4 of RFC 6962](https://tools.ietf.org/html/rfc6962#section-4).
- `scanner/` holds a library for scanning the entire contents of an existing
CT Log.
- Command line tools:
- `./client/ctclient` allows interaction with a CT Log
- `./scanner/scanlog` allows an existing CT Log to be scanned for certificates
of interest; please be polite when running this tool against a Log.
- `./x509util/certcheck` allows display and verification of certificates
- `./x509util/crlcheck` allows display and verification of certificate
revocation lists (CRLs).
- CT Personality for [Trillian](https://github.com/google/trillian):
- `trillian/` holds code that allows a Certificate Transparency Log to be
run using a Trillian Log as its back-end -- see
[below](#trillian-ct-personality).
## Trillian CT Personality
The `trillian/` subdirectory holds code and scripts for running a CT Log based
on the [Trillian](https://github.com/google/trillian) general transparency Log.
The main code for the CT personality is held in `trillian/ctfe`; this code
responds to HTTP requests on the
[CT API paths](https://tools.ietf.org/html/rfc6962#section-4) and translates
them to the equivalent gRPC API requests to the Trillian Log.
This obviously relies on the gRPC API definitions at
`github.com/google/trillian`; the code also uses common libraries from the
Trillian project for:
- exposing monitoring and statistics via an `interface` and corresponding
Prometheus implementation (`github.com/google/trillian/monitoring/...`)
- dealing with cryptographic keys (`github.com/google/trillian/crypto/...`).
The `trillian/integration/` directory holds scripts and tests for running the whole
system locally. In particular:
- `trillian/integration/ct_integration_test.sh` brings up local processes
running a Trillian Log server, signer and a CT personality, and exercises the
complete set of RFC 6962 API entrypoints.
- `trillian/integration/ct_hammer_test.sh` brings up a complete system and runs
a continuous randomized test of the CT entrypoints.
These scripts require a local database instance to be configured as described
in the [Trillian instructions](https://github.com/google/trillian#mysql-setup).
## Working on the Code
Developers who want to make changes to the codebase need some additional
dependencies and tools, described in the following sections. The
[Travis configuration](.travis.yml) for the codebase is also useful reference
for the required tools and scripts, as it may be more up-to-date than this
document.
### Rebuilding Generated Code
Some of the CT Go code is autogenerated from other files:
- [Protocol buffer](https://developers.google.com/protocol-buffers/) message
definitions are converted to `.pb.go` implementations.
- A mock implementation of the Trillian gRPC API (in `trillian/mockclient`) is
created with [GoMock](https://github.com/golang/mock).
Re-generating mock or protobuffer files is only needed if you're changing
the original files; if you do, you'll need to install the prerequisites:
- `mockgen` tool from https://github.com/golang/mock
- `protoc`, [Go support for protoc](https://github.com/golang/protobuf) (see
documentation linked from the
[protobuf site](https://github.com/google/protobuf))
and run the following:
```bash
go generate -x ./... # hunts for //go:generate comments and runs them
```
### Updating Vendor Code
The codebase includes a couple of external projects under the `vendor/`
subdirectory, to ensure that builds use a fixed version (typically because the
upstream repository does not guarantee back-compatibility between the tip
`master` branch and the current stable release). See
[instructions in the Trillian repo](https://github.com/google/trillian#updating-vendor-code)
for how to update vendored subtrees.
### Running Codebase Checks
The [`scripts/presubmit.sh`](scripts/presubmit.sh) script runs various tools
and tests over the codebase.
```bash
# Install gometalinter and all linters
go get -u github.com/alecthomas/gometalinter
gometalinter --install
# Run code generation, build, test and linters
./scripts/presubmit.sh
# Run build, test and linters but skip code generation
./scripts/presubmit.sh --no-generate
# Or just run the linters alone:
gometalinter --config=gometalinter.json ./...
```

View File

@ -1,27 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"asn1.go",
"common.go",
"marshal.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/google/certificate-transparency-go/asn1",
importpath = "github.com/google/certificate-transparency-go/asn1",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

File diff suppressed because it is too large Load Diff

View File

@ -1,177 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package asn1
import (
"reflect"
"strconv"
"strings"
)
// ASN.1 objects have metadata preceding them:
// the tag: the type of the object
// a flag denoting if this object is compound or not
// the class type: the namespace of the tag
// the length of the object, in bytes
// Here are some standard tags and classes
// ASN.1 tags represent the type of the following object.
const (
TagBoolean = 1
TagInteger = 2
TagBitString = 3
TagOctetString = 4
TagNull = 5
TagOID = 6
TagEnum = 10
TagUTF8String = 12
TagSequence = 16
TagSet = 17
TagNumericString = 18
TagPrintableString = 19
TagT61String = 20
TagIA5String = 22
TagUTCTime = 23
TagGeneralizedTime = 24
TagGeneralString = 27
)
// ASN.1 class types represent the namespace of the tag.
const (
ClassUniversal = 0
ClassApplication = 1
ClassContextSpecific = 2
ClassPrivate = 3
)
type tagAndLength struct {
class, tag, length int
isCompound bool
}
// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead
// of" and "in addition to". When not specified, every primitive type has a
// default tag in the UNIVERSAL class.
//
// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1
// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT
// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another.
//
// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an
// /additional/ tag would wrap the default tag. This explicit tag will have the
// compound flag set.
//
// (This is used in order to remove ambiguity with optional elements.)
//
// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we
// don't support that here. We support a single layer of EXPLICIT or IMPLICIT
// tagging with tag strings on the fields of a structure.
// fieldParameters is the parsed representation of tag string from a structure field.
type fieldParameters struct {
optional bool // true iff the field is OPTIONAL
explicit bool // true iff an EXPLICIT tag is in use.
application bool // true iff an APPLICATION tag is in use.
defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
tag *int // the EXPLICIT or IMPLICIT tag (maybe nil).
stringType int // the string tag to use when marshaling.
timeType int // the time tag to use when marshaling.
set bool // true iff this should be encoded as a SET
omitEmpty bool // true iff this should be omitted if empty when marshaling.
name string // name of field for better diagnostics
// Invariants:
// if explicit is set, tag is non-nil.
}
// Given a tag string with the format specified in the package comment,
// parseFieldParameters will parse it into a fieldParameters structure,
// ignoring unknown parts of the string.
func parseFieldParameters(str string) (ret fieldParameters) {
for _, part := range strings.Split(str, ",") {
switch {
case part == "optional":
ret.optional = true
case part == "explicit":
ret.explicit = true
if ret.tag == nil {
ret.tag = new(int)
}
case part == "generalized":
ret.timeType = TagGeneralizedTime
case part == "utc":
ret.timeType = TagUTCTime
case part == "ia5":
ret.stringType = TagIA5String
case part == "printable":
ret.stringType = TagPrintableString
case part == "numeric":
ret.stringType = TagNumericString
case part == "utf8":
ret.stringType = TagUTF8String
case strings.HasPrefix(part, "default:"):
i, err := strconv.ParseInt(part[8:], 10, 64)
if err == nil {
ret.defaultValue = new(int64)
*ret.defaultValue = i
}
case strings.HasPrefix(part, "tag:"):
i, err := strconv.Atoi(part[4:])
if err == nil {
ret.tag = new(int)
*ret.tag = i
}
case part == "set":
ret.set = true
case part == "application":
ret.application = true
if ret.tag == nil {
ret.tag = new(int)
}
case part == "omitempty":
ret.omitEmpty = true
}
}
return
}
// Given a reflected Go type, getUniversalType returns the default tag number
// and expected compound flag.
func getUniversalType(t reflect.Type) (matchAny bool, tagNumber int, isCompound, ok bool) {
switch t {
case rawValueType:
return true, -1, false, true
case objectIdentifierType:
return false, TagOID, false, true
case bitStringType:
return false, TagBitString, false, true
case timeType:
return false, TagUTCTime, false, true
case enumeratedType:
return false, TagEnum, false, true
case bigIntType:
return false, TagInteger, false, true
}
switch t.Kind() {
case reflect.Bool:
return false, TagBoolean, false, true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return false, TagInteger, false, true
case reflect.Struct:
return false, TagSequence, true, true
case reflect.Slice:
if t.Elem().Kind() == reflect.Uint8 {
return false, TagOctetString, false, true
}
if strings.HasSuffix(t.Name(), "SET") {
return false, TagSet, true, true
}
return false, TagSequence, true, true
case reflect.String:
return false, TagPrintableString, false, true
}
return false, 0, false, false
}

View File

@ -1,689 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package asn1
import (
"errors"
"fmt"
"math/big"
"reflect"
"time"
"unicode/utf8"
)
var (
byte00Encoder encoder = byteEncoder(0x00)
byteFFEncoder encoder = byteEncoder(0xff)
)
// encoder represents an ASN.1 element that is waiting to be marshaled.
type encoder interface {
// Len returns the number of bytes needed to marshal this element.
Len() int
// Encode encodes this element by writing Len() bytes to dst.
Encode(dst []byte)
}
type byteEncoder byte
func (c byteEncoder) Len() int {
return 1
}
func (c byteEncoder) Encode(dst []byte) {
dst[0] = byte(c)
}
type bytesEncoder []byte
func (b bytesEncoder) Len() int {
return len(b)
}
func (b bytesEncoder) Encode(dst []byte) {
if copy(dst, b) != len(b) {
panic("internal error")
}
}
type stringEncoder string
func (s stringEncoder) Len() int {
return len(s)
}
func (s stringEncoder) Encode(dst []byte) {
if copy(dst, s) != len(s) {
panic("internal error")
}
}
type multiEncoder []encoder
func (m multiEncoder) Len() int {
var size int
for _, e := range m {
size += e.Len()
}
return size
}
func (m multiEncoder) Encode(dst []byte) {
var off int
for _, e := range m {
e.Encode(dst[off:])
off += e.Len()
}
}
type taggedEncoder struct {
// scratch contains temporary space for encoding the tag and length of
// an element in order to avoid extra allocations.
scratch [8]byte
tag encoder
body encoder
}
func (t *taggedEncoder) Len() int {
return t.tag.Len() + t.body.Len()
}
func (t *taggedEncoder) Encode(dst []byte) {
t.tag.Encode(dst)
t.body.Encode(dst[t.tag.Len():])
}
type int64Encoder int64
func (i int64Encoder) Len() int {
n := 1
for i > 127 {
n++
i >>= 8
}
for i < -128 {
n++
i >>= 8
}
return n
}
func (i int64Encoder) Encode(dst []byte) {
n := i.Len()
for j := 0; j < n; j++ {
dst[j] = byte(i >> uint((n-1-j)*8))
}
}
func base128IntLength(n int64) int {
if n == 0 {
return 1
}
l := 0
for i := n; i > 0; i >>= 7 {
l++
}
return l
}
func appendBase128Int(dst []byte, n int64) []byte {
l := base128IntLength(n)
for i := l - 1; i >= 0; i-- {
o := byte(n >> uint(i*7))
o &= 0x7f
if i != 0 {
o |= 0x80
}
dst = append(dst, o)
}
return dst
}
func makeBigInt(n *big.Int, fieldName string) (encoder, error) {
if n == nil {
return nil, StructuralError{"empty integer", fieldName}
}
if n.Sign() < 0 {
// A negative number has to be converted to two's-complement
// form. So we'll invert and subtract 1. If the
// most-significant-bit isn't set then we'll need to pad the
// beginning with 0xff in order to keep the number negative.
nMinus1 := new(big.Int).Neg(n)
nMinus1.Sub(nMinus1, bigOne)
bytes := nMinus1.Bytes()
for i := range bytes {
bytes[i] ^= 0xff
}
if len(bytes) == 0 || bytes[0]&0x80 == 0 {
return multiEncoder([]encoder{byteFFEncoder, bytesEncoder(bytes)}), nil
}
return bytesEncoder(bytes), nil
} else if n.Sign() == 0 {
// Zero is written as a single 0 zero rather than no bytes.
return byte00Encoder, nil
} else {
bytes := n.Bytes()
if len(bytes) > 0 && bytes[0]&0x80 != 0 {
// We'll have to pad this with 0x00 in order to stop it
// looking like a negative number.
return multiEncoder([]encoder{byte00Encoder, bytesEncoder(bytes)}), nil
}
return bytesEncoder(bytes), nil
}
}
func appendLength(dst []byte, i int) []byte {
n := lengthLength(i)
for ; n > 0; n-- {
dst = append(dst, byte(i>>uint((n-1)*8)))
}
return dst
}
func lengthLength(i int) (numBytes int) {
numBytes = 1
for i > 255 {
numBytes++
i >>= 8
}
return
}
func appendTagAndLength(dst []byte, t tagAndLength) []byte {
b := uint8(t.class) << 6
if t.isCompound {
b |= 0x20
}
if t.tag >= 31 {
b |= 0x1f
dst = append(dst, b)
dst = appendBase128Int(dst, int64(t.tag))
} else {
b |= uint8(t.tag)
dst = append(dst, b)
}
if t.length >= 128 {
l := lengthLength(t.length)
dst = append(dst, 0x80|byte(l))
dst = appendLength(dst, t.length)
} else {
dst = append(dst, byte(t.length))
}
return dst
}
type bitStringEncoder BitString
func (b bitStringEncoder) Len() int {
return len(b.Bytes) + 1
}
func (b bitStringEncoder) Encode(dst []byte) {
dst[0] = byte((8 - b.BitLength%8) % 8)
if copy(dst[1:], b.Bytes) != len(b.Bytes) {
panic("internal error")
}
}
type oidEncoder []int
func (oid oidEncoder) Len() int {
l := base128IntLength(int64(oid[0]*40 + oid[1]))
for i := 2; i < len(oid); i++ {
l += base128IntLength(int64(oid[i]))
}
return l
}
func (oid oidEncoder) Encode(dst []byte) {
dst = appendBase128Int(dst[:0], int64(oid[0]*40+oid[1]))
for i := 2; i < len(oid); i++ {
dst = appendBase128Int(dst, int64(oid[i]))
}
}
func makeObjectIdentifier(oid []int, fieldName string) (e encoder, err error) {
if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) {
return nil, StructuralError{"invalid object identifier", fieldName}
}
return oidEncoder(oid), nil
}
func makePrintableString(s, fieldName string) (e encoder, err error) {
for i := 0; i < len(s); i++ {
// The asterisk is often used in PrintableString, even though
// it is invalid. If a PrintableString was specifically
// requested then the asterisk is permitted by this code.
// Ampersand is allowed in parsing due a handful of CA
// certificates, however when making new certificates
// it is rejected.
if !isPrintable(s[i], allowAsterisk, rejectAmpersand) {
return nil, StructuralError{"PrintableString contains invalid character", fieldName}
}
}
return stringEncoder(s), nil
}
func makeIA5String(s, fieldName string) (e encoder, err error) {
for i := 0; i < len(s); i++ {
if s[i] > 127 {
return nil, StructuralError{"IA5String contains invalid character", fieldName}
}
}
return stringEncoder(s), nil
}
func makeNumericString(s string, fieldName string) (e encoder, err error) {
for i := 0; i < len(s); i++ {
if !isNumeric(s[i]) {
return nil, StructuralError{"NumericString contains invalid character", fieldName}
}
}
return stringEncoder(s), nil
}
func makeUTF8String(s string) encoder {
return stringEncoder(s)
}
func appendTwoDigits(dst []byte, v int) []byte {
return append(dst, byte('0'+(v/10)%10), byte('0'+v%10))
}
func appendFourDigits(dst []byte, v int) []byte {
var bytes [4]byte
for i := range bytes {
bytes[3-i] = '0' + byte(v%10)
v /= 10
}
return append(dst, bytes[:]...)
}
func outsideUTCRange(t time.Time) bool {
year := t.Year()
return year < 1950 || year >= 2050
}
func makeUTCTime(t time.Time, fieldName string) (e encoder, err error) {
dst := make([]byte, 0, 18)
dst, err = appendUTCTime(dst, t, fieldName)
if err != nil {
return nil, err
}
return bytesEncoder(dst), nil
}
func makeGeneralizedTime(t time.Time, fieldName string) (e encoder, err error) {
dst := make([]byte, 0, 20)
dst, err = appendGeneralizedTime(dst, t, fieldName)
if err != nil {
return nil, err
}
return bytesEncoder(dst), nil
}
func appendUTCTime(dst []byte, t time.Time, fieldName string) (ret []byte, err error) {
year := t.Year()
switch {
case 1950 <= year && year < 2000:
dst = appendTwoDigits(dst, year-1900)
case 2000 <= year && year < 2050:
dst = appendTwoDigits(dst, year-2000)
default:
return nil, StructuralError{"cannot represent time as UTCTime", fieldName}
}
return appendTimeCommon(dst, t), nil
}
func appendGeneralizedTime(dst []byte, t time.Time, fieldName string) (ret []byte, err error) {
year := t.Year()
if year < 0 || year > 9999 {
return nil, StructuralError{"cannot represent time as GeneralizedTime", fieldName}
}
dst = appendFourDigits(dst, year)
return appendTimeCommon(dst, t), nil
}
func appendTimeCommon(dst []byte, t time.Time) []byte {
_, month, day := t.Date()
dst = appendTwoDigits(dst, int(month))
dst = appendTwoDigits(dst, day)
hour, min, sec := t.Clock()
dst = appendTwoDigits(dst, hour)
dst = appendTwoDigits(dst, min)
dst = appendTwoDigits(dst, sec)
_, offset := t.Zone()
switch {
case offset/60 == 0:
return append(dst, 'Z')
case offset > 0:
dst = append(dst, '+')
case offset < 0:
dst = append(dst, '-')
}
offsetMinutes := offset / 60
if offsetMinutes < 0 {
offsetMinutes = -offsetMinutes
}
dst = appendTwoDigits(dst, offsetMinutes/60)
dst = appendTwoDigits(dst, offsetMinutes%60)
return dst
}
func stripTagAndLength(in []byte) []byte {
_, offset, err := parseTagAndLength(in, 0, "")
if err != nil {
return in
}
return in[offset:]
}
func makeBody(value reflect.Value, params fieldParameters) (e encoder, err error) {
switch value.Type() {
case flagType:
return bytesEncoder(nil), nil
case timeType:
t := value.Interface().(time.Time)
if params.timeType == TagGeneralizedTime || outsideUTCRange(t) {
return makeGeneralizedTime(t, params.name)
}
return makeUTCTime(t, params.name)
case bitStringType:
return bitStringEncoder(value.Interface().(BitString)), nil
case objectIdentifierType:
return makeObjectIdentifier(value.Interface().(ObjectIdentifier), params.name)
case bigIntType:
return makeBigInt(value.Interface().(*big.Int), params.name)
}
switch v := value; v.Kind() {
case reflect.Bool:
if v.Bool() {
return byteFFEncoder, nil
}
return byte00Encoder, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return int64Encoder(v.Int()), nil
case reflect.Struct:
t := v.Type()
for i := 0; i < t.NumField(); i++ {
if t.Field(i).PkgPath != "" {
return nil, StructuralError{"struct contains unexported fields", t.Field(i).Name}
}
}
startingField := 0
n := t.NumField()
if n == 0 {
return bytesEncoder(nil), nil
}
// If the first element of the structure is a non-empty
// RawContents, then we don't bother serializing the rest.
if t.Field(0).Type == rawContentsType {
s := v.Field(0)
if s.Len() > 0 {
bytes := s.Bytes()
/* The RawContents will contain the tag and
* length fields but we'll also be writing
* those ourselves, so we strip them out of
* bytes */
return bytesEncoder(stripTagAndLength(bytes)), nil
}
startingField = 1
}
switch n1 := n - startingField; n1 {
case 0:
return bytesEncoder(nil), nil
case 1:
return makeField(v.Field(startingField), parseFieldParameters(t.Field(startingField).Tag.Get("asn1")))
default:
m := make([]encoder, n1)
for i := 0; i < n1; i++ {
m[i], err = makeField(v.Field(i+startingField), parseFieldParameters(t.Field(i+startingField).Tag.Get("asn1")))
if err != nil {
return nil, err
}
}
return multiEncoder(m), nil
}
case reflect.Slice:
sliceType := v.Type()
if sliceType.Elem().Kind() == reflect.Uint8 {
return bytesEncoder(v.Bytes()), nil
}
var fp fieldParameters
switch l := v.Len(); l {
case 0:
return bytesEncoder(nil), nil
case 1:
return makeField(v.Index(0), fp)
default:
m := make([]encoder, l)
for i := 0; i < l; i++ {
m[i], err = makeField(v.Index(i), fp)
if err != nil {
return nil, err
}
}
return multiEncoder(m), nil
}
case reflect.String:
switch params.stringType {
case TagIA5String:
return makeIA5String(v.String(), params.name)
case TagPrintableString:
return makePrintableString(v.String(), params.name)
case TagNumericString:
return makeNumericString(v.String(), params.name)
default:
return makeUTF8String(v.String()), nil
}
}
return nil, StructuralError{"unknown Go type", params.name}
}
func makeField(v reflect.Value, params fieldParameters) (e encoder, err error) {
if !v.IsValid() {
return nil, fmt.Errorf("asn1: cannot marshal nil value")
}
// If the field is an interface{} then recurse into it.
if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 {
return makeField(v.Elem(), params)
}
if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty {
return bytesEncoder(nil), nil
}
if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) {
defaultValue := reflect.New(v.Type()).Elem()
defaultValue.SetInt(*params.defaultValue)
if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) {
return bytesEncoder(nil), nil
}
}
// If no default value is given then the zero value for the type is
// assumed to be the default value. This isn't obviously the correct
// behavior, but it's what Go has traditionally done.
if params.optional && params.defaultValue == nil {
if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) {
return bytesEncoder(nil), nil
}
}
if v.Type() == rawValueType {
rv := v.Interface().(RawValue)
if len(rv.FullBytes) != 0 {
return bytesEncoder(rv.FullBytes), nil
}
t := new(taggedEncoder)
t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound}))
t.body = bytesEncoder(rv.Bytes)
return t, nil
}
matchAny, tag, isCompound, ok := getUniversalType(v.Type())
if !ok || matchAny {
return nil, StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type()), params.name}
}
if params.timeType != 0 && tag != TagUTCTime {
return nil, StructuralError{"explicit time type given to non-time member", params.name}
}
if params.stringType != 0 && tag != TagPrintableString {
return nil, StructuralError{"explicit string type given to non-string member", params.name}
}
switch tag {
case TagPrintableString:
if params.stringType == 0 {
// This is a string without an explicit string type. We'll use
// a PrintableString if the character set in the string is
// sufficiently limited, otherwise we'll use a UTF8String.
for _, r := range v.String() {
if r >= utf8.RuneSelf || !isPrintable(byte(r), rejectAsterisk, rejectAmpersand) {
if !utf8.ValidString(v.String()) {
return nil, errors.New("asn1: string not valid UTF-8")
}
tag = TagUTF8String
break
}
}
} else {
tag = params.stringType
}
case TagUTCTime:
if params.timeType == TagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) {
tag = TagGeneralizedTime
}
}
if params.set {
if tag != TagSequence {
return nil, StructuralError{"non sequence tagged as set", params.name}
}
tag = TagSet
}
t := new(taggedEncoder)
t.body, err = makeBody(v, params)
if err != nil {
return nil, err
}
bodyLen := t.body.Len()
class := ClassUniversal
if params.tag != nil {
if params.application {
class = ClassApplication
} else {
class = ClassContextSpecific
}
if params.explicit {
t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{ClassUniversal, tag, bodyLen, isCompound}))
tt := new(taggedEncoder)
tt.body = t
tt.tag = bytesEncoder(appendTagAndLength(tt.scratch[:0], tagAndLength{
class: class,
tag: *params.tag,
length: bodyLen + t.tag.Len(),
isCompound: true,
}))
return tt, nil
}
// implicit tag.
tag = *params.tag
}
t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{class, tag, bodyLen, isCompound}))
return t, nil
}
// Marshal returns the ASN.1 encoding of val.
//
// In addition to the struct tags recognised by Unmarshal, the following can be
// used:
//
// ia5: causes strings to be marshaled as ASN.1, IA5String values
// omitempty: causes empty slices to be skipped
// printable: causes strings to be marshaled as ASN.1, PrintableString values
// utf8: causes strings to be marshaled as ASN.1, UTF8String values
// utc: causes time.Time to be marshaled as ASN.1, UTCTime values
// generalized: causes time.Time to be marshaled as ASN.1, GeneralizedTime values
func Marshal(val interface{}) ([]byte, error) {
return MarshalWithParams(val, "")
}
// MarshalWithParams allows field parameters to be specified for the
// top-level element. The form of the params is the same as the field tags.
func MarshalWithParams(val interface{}, params string) ([]byte, error) {
e, err := makeField(reflect.ValueOf(val), parseFieldParameters(params))
if err != nil {
return nil, err
}
b := make([]byte, e.Len())
e.Encode(b)
return b, nil
}

View File

@ -1,39 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"getentries.go",
"logclient.go",
"multilog.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/google/certificate-transparency-go/client",
importpath = "github.com/google/certificate-transparency-go/client",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/protobuf/proto:go_default_library",
"//vendor/github.com/golang/protobuf/ptypes:go_default_library",
"//vendor/github.com/google/certificate-transparency-go:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/client/configpb:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/jsonclient:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/tls:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/x509:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/google/certificate-transparency-go/client/configpb:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,17 +0,0 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package configpb
//go:generate protoc -I=. -I=$GOPATH/src --go_out=:. multilog.proto

View File

@ -1,158 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: multilog.proto
package configpb
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// TemporalLogConfig is a set of LogShardConfig messages, whose
// time limits should be contiguous.
type TemporalLogConfig struct {
Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard,proto3" json:"shard,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TemporalLogConfig) Reset() { *m = TemporalLogConfig{} }
func (m *TemporalLogConfig) String() string { return proto.CompactTextString(m) }
func (*TemporalLogConfig) ProtoMessage() {}
func (*TemporalLogConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_multilog_3c9b797b88da6f07, []int{0}
}
func (m *TemporalLogConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_TemporalLogConfig.Unmarshal(m, b)
}
func (m *TemporalLogConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_TemporalLogConfig.Marshal(b, m, deterministic)
}
func (dst *TemporalLogConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_TemporalLogConfig.Merge(dst, src)
}
func (m *TemporalLogConfig) XXX_Size() int {
return xxx_messageInfo_TemporalLogConfig.Size(m)
}
func (m *TemporalLogConfig) XXX_DiscardUnknown() {
xxx_messageInfo_TemporalLogConfig.DiscardUnknown(m)
}
var xxx_messageInfo_TemporalLogConfig proto.InternalMessageInfo
func (m *TemporalLogConfig) GetShard() []*LogShardConfig {
if m != nil {
return m.Shard
}
return nil
}
// LogShardConfig describes the acceptable date range for a single shard of a temporal
// log.
type LogShardConfig struct {
Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"`
// The log's public key in DER-encoded PKIX form.
PublicKeyDer []byte `protobuf:"bytes,2,opt,name=public_key_der,json=publicKeyDer,proto3" json:"public_key_der,omitempty"`
// not_after_start defines the start of the range of acceptable NotAfter
// values, inclusive.
// Leaving this unset implies no lower bound to the range.
NotAfterStart *timestamp.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart,proto3" json:"not_after_start,omitempty"`
// not_after_limit defines the end of the range of acceptable NotAfter values,
// exclusive.
// Leaving this unset implies no upper bound to the range.
NotAfterLimit *timestamp.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit,proto3" json:"not_after_limit,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LogShardConfig) Reset() { *m = LogShardConfig{} }
func (m *LogShardConfig) String() string { return proto.CompactTextString(m) }
func (*LogShardConfig) ProtoMessage() {}
func (*LogShardConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_multilog_3c9b797b88da6f07, []int{1}
}
func (m *LogShardConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LogShardConfig.Unmarshal(m, b)
}
func (m *LogShardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LogShardConfig.Marshal(b, m, deterministic)
}
func (dst *LogShardConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_LogShardConfig.Merge(dst, src)
}
func (m *LogShardConfig) XXX_Size() int {
return xxx_messageInfo_LogShardConfig.Size(m)
}
func (m *LogShardConfig) XXX_DiscardUnknown() {
xxx_messageInfo_LogShardConfig.DiscardUnknown(m)
}
var xxx_messageInfo_LogShardConfig proto.InternalMessageInfo
func (m *LogShardConfig) GetUri() string {
if m != nil {
return m.Uri
}
return ""
}
func (m *LogShardConfig) GetPublicKeyDer() []byte {
if m != nil {
return m.PublicKeyDer
}
return nil
}
func (m *LogShardConfig) GetNotAfterStart() *timestamp.Timestamp {
if m != nil {
return m.NotAfterStart
}
return nil
}
func (m *LogShardConfig) GetNotAfterLimit() *timestamp.Timestamp {
if m != nil {
return m.NotAfterLimit
}
return nil
}
func init() {
proto.RegisterType((*TemporalLogConfig)(nil), "configpb.TemporalLogConfig")
proto.RegisterType((*LogShardConfig)(nil), "configpb.LogShardConfig")
}
func init() { proto.RegisterFile("multilog.proto", fileDescriptor_multilog_3c9b797b88da6f07) }
var fileDescriptor_multilog_3c9b797b88da6f07 = []byte{
// 241 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x8f, 0xb1, 0x4e, 0xc3, 0x30,
0x14, 0x45, 0x65, 0x02, 0x08, 0xdc, 0x12, 0xc0, 0x93, 0xd5, 0x85, 0xa8, 0x62, 0xc8, 0xe4, 0x4a,
0xe5, 0x0b, 0xa0, 0x6c, 0x64, 0x4a, 0xbb, 0x47, 0x4e, 0xeb, 0x18, 0x0b, 0x3b, 0xcf, 0x72, 0x5e,
0x86, 0xfe, 0x25, 0x9f, 0x84, 0x1c, 0x2b, 0x43, 0x37, 0xb6, 0xa7, 0x77, 0xcf, 0xb9, 0xd2, 0xa5,
0xb9, 0x1b, 0x2d, 0x1a, 0x0b, 0x5a, 0xf8, 0x00, 0x08, 0xec, 0xee, 0x08, 0x7d, 0x67, 0xb4, 0x6f,
0x57, 0x2f, 0x1a, 0x40, 0x5b, 0xb5, 0x99, 0xfe, 0xed, 0xd8, 0x6d, 0xd0, 0x38, 0x35, 0xa0, 0x74,
0x3e, 0xa1, 0xeb, 0x1d, 0x7d, 0x3e, 0x28, 0xe7, 0x21, 0x48, 0x5b, 0x81, 0xde, 0x4d, 0x1e, 0x13,
0xf4, 0x66, 0xf8, 0x96, 0xe1, 0xc4, 0x49, 0x91, 0x95, 0x8b, 0x2d, 0x17, 0x73, 0x9f, 0xa8, 0x40,
0xef, 0x63, 0x92, 0xc0, 0x3a, 0x61, 0xeb, 0x5f, 0x42, 0xf3, 0xcb, 0x84, 0x3d, 0xd1, 0x6c, 0x0c,
0x86, 0x93, 0x82, 0x94, 0xf7, 0x75, 0x3c, 0xd9, 0x2b, 0xcd, 0xfd, 0xd8, 0x5a, 0x73, 0x6c, 0x7e,
0xd4, 0xb9, 0x39, 0xa9, 0xc0, 0xaf, 0x0a, 0x52, 0x2e, 0xeb, 0x65, 0xfa, 0x7e, 0xa9, 0xf3, 0xa7,
0x0a, 0xec, 0x83, 0x3e, 0xf6, 0x80, 0x8d, 0xec, 0x50, 0x85, 0x66, 0x40, 0x19, 0x90, 0x67, 0x05,
0x29, 0x17, 0xdb, 0x95, 0x48, 0x53, 0xc4, 0x3c, 0x45, 0x1c, 0xe6, 0x29, 0xf5, 0x43, 0x0f, 0xf8,
0x1e, 0x8d, 0x7d, 0x14, 0x2e, 0x3b, 0xac, 0x71, 0x06, 0xf9, 0xf5, 0xff, 0x3b, 0xaa, 0x28, 0xb4,
0xb7, 0x13, 0xf2, 0xf6, 0x17, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xd9, 0x50, 0x5b, 0x5b, 0x01, 0x00,
0x00,
}

View File

@ -1,43 +0,0 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package configpb;
import "google/protobuf/timestamp.proto";
// TemporalLogConfig is a set of LogShardConfig messages, whose
// time limits should be contiguous.
message TemporalLogConfig {
repeated LogShardConfig shard = 1;
}
// LogShardConfig describes the acceptable date range for a single shard of a temporal
// log.
message LogShardConfig {
string uri = 1;
// The log's public key in DER-encoded PKIX form.
bytes public_key_der = 2;
// not_after_start defines the start of the range of acceptable NotAfter
// values, inclusive.
// Leaving this unset implies no lower bound to the range.
google.protobuf.Timestamp not_after_start = 3;
// not_after_limit defines the end of the range of acceptable NotAfter values,
// exclusive.
// Leaving this unset implies no upper bound to the range.
google.protobuf.Timestamp not_after_limit = 4;
}

View File

@ -1,75 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"context"
"errors"
"strconv"
ct "github.com/google/certificate-transparency-go"
"github.com/google/certificate-transparency-go/x509"
)
// GetRawEntries exposes the /ct/v1/get-entries result with only the JSON parsing done.
func (c *LogClient) GetRawEntries(ctx context.Context, start, end int64) (*ct.GetEntriesResponse, error) {
if end < 0 {
return nil, errors.New("end should be >= 0")
}
if end < start {
return nil, errors.New("start should be <= end")
}
params := map[string]string{
"start": strconv.FormatInt(start, 10),
"end": strconv.FormatInt(end, 10),
}
if ctx == nil {
ctx = context.TODO()
}
var resp ct.GetEntriesResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetEntriesPath, params, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
return &resp, nil
}
// GetEntries attempts to retrieve the entries in the sequence [start, end] from the CT log server
// (RFC6962 s4.6) as parsed [pre-]certificates for convenience, held in a slice of ct.LogEntry structures.
// However, this does mean that any certificate parsing failures will cause a failure of the whole
// retrieval operation; for more robust retrieval of parsed certificates, use GetRawEntries() and invoke
// ct.LogEntryFromLeaf() on each individual entry.
func (c *LogClient) GetEntries(ctx context.Context, start, end int64) ([]ct.LogEntry, error) {
resp, err := c.GetRawEntries(ctx, start, end)
if err != nil {
return nil, err
}
entries := make([]ct.LogEntry, len(resp.Entries))
for i, entry := range resp.Entries {
index := start + int64(i)
logEntry, err := ct.LogEntryFromLeaf(index, &entry)
if x509.IsFatal(err) {
return nil, err
}
entries[i] = *logEntry
}
return entries, nil
}

View File

@ -1,289 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package client is a CT log client implementation and contains types and code
// for interacting with RFC6962-compliant CT Log instances.
// See http://tools.ietf.org/html/rfc6962 for details
package client
import (
"context"
"encoding/base64"
"fmt"
"net/http"
"strconv"
ct "github.com/google/certificate-transparency-go"
"github.com/google/certificate-transparency-go/jsonclient"
"github.com/google/certificate-transparency-go/tls"
)
// LogClient represents a client for a given CT Log instance
type LogClient struct {
jsonclient.JSONClient
}
// CheckLogClient is an interface that allows (just) checking of various log contents.
type CheckLogClient interface {
BaseURI() string
GetSTH(context.Context) (*ct.SignedTreeHead, error)
GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error)
GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error)
}
// New constructs a new LogClient instance.
// |uri| is the base URI of the CT log instance to interact with, e.g.
// https://ct.googleapis.com/pilot
// |hc| is the underlying client to be used for HTTP requests to the CT log.
// |opts| can be used to provide a custom logger interface and a public key
// for signature verification.
func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, error) {
logClient, err := jsonclient.New(uri, hc, opts)
if err != nil {
return nil, err
}
return &LogClient{*logClient}, err
}
// RspError represents an error that occurred when processing a response from a server,
// and also includes key details from the http.Response that triggered the error.
type RspError struct {
Err error
StatusCode int
Body []byte
}
// Error formats the RspError instance, focusing on the error.
func (e RspError) Error() string {
return e.Err.Error()
}
// Attempts to add |chain| to the log, using the api end-point specified by
// |path|. If provided context expires before submission is complete an
// error will be returned.
func (c *LogClient) addChainWithRetry(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
var resp ct.AddChainResponse
var req ct.AddChainRequest
for _, link := range chain {
req.Chain = append(req.Chain, link.Data)
}
httpRsp, body, err := c.PostAndParseWithRetry(ctx, path, &req, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
var ds ct.DigitallySigned
if rest, err := tls.Unmarshal(resp.Signature, &ds); err != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
} else if len(rest) > 0 {
return nil, RspError{
Err: fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)),
StatusCode: httpRsp.StatusCode,
Body: body,
}
}
exts, err := base64.StdEncoding.DecodeString(resp.Extensions)
if err != nil {
return nil, RspError{
Err: fmt.Errorf("invalid base64 data in Extensions (%q): %v", resp.Extensions, err),
StatusCode: httpRsp.StatusCode,
Body: body,
}
}
var logID ct.LogID
copy(logID.KeyID[:], resp.ID)
sct := &ct.SignedCertificateTimestamp{
SCTVersion: resp.SCTVersion,
LogID: logID,
Timestamp: resp.Timestamp,
Extensions: ct.CTExtensions(exts),
Signature: ds,
}
if err := c.VerifySCTSignature(*sct, ctype, chain); err != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return sct, nil
}
// AddChain adds the (DER represented) X509 |chain| to the log.
func (c *LogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
return c.addChainWithRetry(ctx, ct.X509LogEntryType, ct.AddChainPath, chain)
}
// AddPreChain adds the (DER represented) Precertificate |chain| to the log.
func (c *LogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
return c.addChainWithRetry(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain)
}
// AddJSON submits arbitrary data to to XJSON server.
func (c *LogClient) AddJSON(ctx context.Context, data interface{}) (*ct.SignedCertificateTimestamp, error) {
req := ct.AddJSONRequest{Data: data}
var resp ct.AddChainResponse
httpRsp, body, err := c.PostAndParse(ctx, ct.AddJSONPath, &req, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
var ds ct.DigitallySigned
if rest, err := tls.Unmarshal(resp.Signature, &ds); err != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
} else if len(rest) > 0 {
return nil, RspError{
Err: fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)),
StatusCode: httpRsp.StatusCode,
Body: body,
}
}
var logID ct.LogID
copy(logID.KeyID[:], resp.ID)
return &ct.SignedCertificateTimestamp{
SCTVersion: resp.SCTVersion,
LogID: logID,
Timestamp: resp.Timestamp,
Extensions: ct.CTExtensions(resp.Extensions),
Signature: ds,
}, nil
}
// GetSTH retrieves the current STH from the log.
// Returns a populated SignedTreeHead, or a non-nil error (which may be of type
// RspError if a raw http.Response is available).
func (c *LogClient) GetSTH(ctx context.Context) (*ct.SignedTreeHead, error) {
var resp ct.GetSTHResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHPath, nil, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
sth, err := resp.ToSignedTreeHead()
if err != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
if err := c.VerifySTHSignature(*sth); err != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return sth, nil
}
// VerifySTHSignature checks the signature in sth, returning any error encountered or nil if verification is
// successful.
func (c *LogClient) VerifySTHSignature(sth ct.SignedTreeHead) error {
if c.Verifier == nil {
// Can't verify signatures without a verifier
return nil
}
return c.Verifier.VerifySTHSignature(sth)
}
// VerifySCTSignature checks the signature in sct for the given LogEntryType, with associated certificate chain.
func (c *LogClient) VerifySCTSignature(sct ct.SignedCertificateTimestamp, ctype ct.LogEntryType, certData []ct.ASN1Cert) error {
if c.Verifier == nil {
// Can't verify signatures without a verifier
return nil
}
leaf, err := ct.MerkleTreeLeafFromRawChain(certData, ctype, sct.Timestamp)
if err != nil {
return fmt.Errorf("failed to build MerkleTreeLeaf: %v", err)
}
entry := ct.LogEntry{Leaf: *leaf}
return c.Verifier.VerifySCTSignature(sct, entry)
}
// GetSTHConsistency retrieves the consistency proof between two snapshots.
func (c *LogClient) GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error) {
base10 := 10
params := map[string]string{
"first": strconv.FormatUint(first, base10),
"second": strconv.FormatUint(second, base10),
}
var resp ct.GetSTHConsistencyResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
return resp.Consistency, nil
}
// GetProofByHash returns an audit path for the hash of an SCT.
func (c *LogClient) GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error) {
b64Hash := base64.StdEncoding.EncodeToString(hash)
base10 := 10
params := map[string]string{
"tree_size": strconv.FormatUint(treeSize, base10),
"hash": b64Hash,
}
var resp ct.GetProofByHashResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetProofByHashPath, params, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
return &resp, nil
}
// GetAcceptedRoots retrieves the set of acceptable root certificates for a log.
func (c *LogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) {
var resp ct.GetRootsResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetRootsPath, nil, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
var roots []ct.ASN1Cert
for _, cert64 := range resp.Certificates {
cert, err := base64.StdEncoding.DecodeString(cert64)
if err != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
roots = append(roots, ct.ASN1Cert{Data: cert})
}
return roots, nil
}
// GetEntryAndProof returns a log entry and audit path for the index of a leaf.
func (c *LogClient) GetEntryAndProof(ctx context.Context, index, treeSize uint64) (*ct.GetEntryAndProofResponse, error) {
base10 := 10
params := map[string]string{
"leaf_index": strconv.FormatUint(index, base10),
"tree_size": strconv.FormatUint(treeSize, base10),
}
var resp ct.GetEntryAndProofResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetEntryAndProofPath, params, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
return &resp, nil
}

View File

@ -1,221 +0,0 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"context"
"crypto/sha256"
"errors"
"fmt"
"io/ioutil"
"net/http"
"time"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
ct "github.com/google/certificate-transparency-go"
"github.com/google/certificate-transparency-go/client/configpb"
"github.com/google/certificate-transparency-go/jsonclient"
"github.com/google/certificate-transparency-go/x509"
)
type interval struct {
lower *time.Time // nil => no lower bound
upper *time.Time // nil => no upper bound
}
// TemporalLogConfigFromFile creates a TemporalLogConfig object from the given
// filename, which should contain text-protobuf encoded configuration data.
func TemporalLogConfigFromFile(filename string) (*configpb.TemporalLogConfig, error) {
if len(filename) == 0 {
return nil, errors.New("log config filename empty")
}
cfgText, err := ioutil.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("failed to read log config: %v", err)
}
var cfg configpb.TemporalLogConfig
if err := proto.UnmarshalText(string(cfgText), &cfg); err != nil {
return nil, fmt.Errorf("failed to parse log config: %v", err)
}
if len(cfg.Shard) == 0 {
return nil, errors.New("empty log config found")
}
return &cfg, nil
}
// AddLogClient is an interface that allows adding certificates and pre-certificates to a log.
// Both LogClient and TemporalLogClient implement this interface, which allows users to
// commonize code for adding certs to normal/temporal logs.
type AddLogClient interface {
AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error)
AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error)
GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error)
}
// TemporalLogClient allows [pre-]certificates to be uploaded to a temporal log.
type TemporalLogClient struct {
Clients []*LogClient
intervals []interval
}
// NewTemporalLogClient builds a new client for interacting with a temporal log.
// The provided config should be contiguous and chronological.
func NewTemporalLogClient(cfg configpb.TemporalLogConfig, hc *http.Client) (*TemporalLogClient, error) {
if len(cfg.Shard) == 0 {
return nil, errors.New("empty config")
}
overall, err := shardInterval(cfg.Shard[0])
if err != nil {
return nil, fmt.Errorf("cfg.Shard[0] invalid: %v", err)
}
intervals := make([]interval, 0, len(cfg.Shard))
intervals = append(intervals, overall)
for i := 1; i < len(cfg.Shard); i++ {
interval, err := shardInterval(cfg.Shard[i])
if err != nil {
return nil, fmt.Errorf("cfg.Shard[%d] invalid: %v", i, err)
}
if overall.upper == nil {
return nil, fmt.Errorf("cfg.Shard[%d] extends an interval with no upper bound", i)
}
if interval.lower == nil {
return nil, fmt.Errorf("cfg.Shard[%d] has no lower bound but extends an interval", i)
}
if !interval.lower.Equal(*overall.upper) {
return nil, fmt.Errorf("cfg.Shard[%d] starts at %v but previous interval ended at %v", i, interval.lower, overall.upper)
}
overall.upper = interval.upper
intervals = append(intervals, interval)
}
clients := make([]*LogClient, 0, len(cfg.Shard))
for i, shard := range cfg.Shard {
opts := jsonclient.Options{}
opts.PublicKeyDER = shard.GetPublicKeyDer()
c, err := New(shard.Uri, hc, opts)
if err != nil {
return nil, fmt.Errorf("failed to create client for cfg.Shard[%d]: %v", i, err)
}
clients = append(clients, c)
}
tlc := TemporalLogClient{
Clients: clients,
intervals: intervals,
}
return &tlc, nil
}
// GetAcceptedRoots retrieves the set of acceptable root certificates for all
// of the shards of a temporal log (i.e. the union).
func (tlc *TemporalLogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) {
type result struct {
roots []ct.ASN1Cert
err error
}
results := make(chan result, len(tlc.Clients))
for _, c := range tlc.Clients {
go func(c *LogClient) {
var r result
r.roots, r.err = c.GetAcceptedRoots(ctx)
results <- r
}(c)
}
var allRoots []ct.ASN1Cert
seen := make(map[[sha256.Size]byte]bool)
for range tlc.Clients {
r := <-results
if r.err != nil {
return nil, r.err
}
for _, root := range r.roots {
h := sha256.Sum256(root.Data)
if seen[h] {
continue
}
seen[h] = true
allRoots = append(allRoots, root)
}
}
return allRoots, nil
}
// AddChain adds the (DER represented) X509 chain to the appropriate log.
func (tlc *TemporalLogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
return tlc.addChain(ctx, ct.X509LogEntryType, ct.AddChainPath, chain)
}
// AddPreChain adds the (DER represented) Precertificate chain to the appropriate log.
func (tlc *TemporalLogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
return tlc.addChain(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain)
}
func (tlc *TemporalLogClient) addChain(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
// Parse the first entry in the chain
if len(chain) == 0 {
return nil, errors.New("missing chain")
}
cert, err := x509.ParseCertificate(chain[0].Data)
if err != nil {
return nil, fmt.Errorf("failed to parse initial chain entry: %v", err)
}
cidx, err := tlc.IndexByDate(cert.NotAfter)
if err != nil {
return nil, fmt.Errorf("failed to find log to process cert: %v", err)
}
return tlc.Clients[cidx].addChainWithRetry(ctx, ctype, path, chain)
}
// IndexByDate returns the index of the Clients entry that is appropriate for the given
// date.
func (tlc *TemporalLogClient) IndexByDate(when time.Time) (int, error) {
for i, interval := range tlc.intervals {
if (interval.lower != nil) && when.Before(*interval.lower) {
continue
}
if (interval.upper != nil) && !when.Before(*interval.upper) {
continue
}
return i, nil
}
return -1, fmt.Errorf("no log found encompassing date %v", when)
}
func shardInterval(cfg *configpb.LogShardConfig) (interval, error) {
var interval interval
if cfg.NotAfterStart != nil {
t, err := ptypes.Timestamp(cfg.NotAfterStart)
if err != nil {
return interval, fmt.Errorf("failed to parse NotAfterStart: %v", err)
}
interval.lower = &t
}
if cfg.NotAfterLimit != nil {
t, err := ptypes.Timestamp(cfg.NotAfterLimit)
if err != nil {
return interval, fmt.Errorf("failed to parse NotAfterLimit: %v", err)
}
interval.upper = &t
}
if interval.lower != nil && interval.upper != nil && !(*interval.lower).Before(*interval.upper) {
return interval, errors.New("inverted interval")
}
return interval, nil
}

View File

@ -1,10 +0,0 @@
steps:
- id: build_ctfe
name: gcr.io/cloud-builders/docker
args:
- build
- --file=trillian/examples/deployment/docker/ctfe/Dockerfile
- --tag=gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME}
- .
images:
- gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME}

View File

@ -1,28 +0,0 @@
{
"Deadline": "60s",
"Linters": {
"license": "./scripts/check_license.sh:PATH:LINE:MESSAGE",
"forked": "./scripts/check_forked.sh:PATH:LINE:MESSAGE",
"unforked": "./scripts/check_unforked.sh:PATH:LINE:MESSAGE"
},
"Enable": [
"forked",
"gocyclo",
"gofmt",
"goimports",
"golint",
"license",
"misspell",
"unforked",
"vet"
],
"Exclude": [
"x509/",
"asn1/",
".+\\.pb\\.go",
".+\\.pb\\.gw\\.go",
"mock_.+\\.go"
],
"Cyclo": 40,
"Vendor": true
}

View File

@ -1,31 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"backoff.go",
"client.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/google/certificate-transparency-go/jsonclient",
importpath = "github.com/google/certificate-transparency-go/jsonclient",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/google/certificate-transparency-go:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/x509:go_default_library",
"//vendor/golang.org/x/net/context/ctxhttp:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,72 +0,0 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsonclient
import (
"sync"
"time"
)
type backoff struct {
mu sync.RWMutex
multiplier uint
notBefore time.Time
}
const (
// maximum backoff is 2^(maxMultiplier-1) = 128 seconds
maxMultiplier = 8
)
func (b *backoff) set(override *time.Duration) time.Duration {
b.mu.Lock()
defer b.mu.Unlock()
if b.notBefore.After(time.Now()) {
if override != nil {
// If existing backoff is set but override would be longer than
// it then set it to that.
notBefore := time.Now().Add(*override)
if notBefore.After(b.notBefore) {
b.notBefore = notBefore
}
}
return time.Until(b.notBefore)
}
var wait time.Duration
if override != nil {
wait = *override
} else {
if b.multiplier < maxMultiplier {
b.multiplier++
}
wait = time.Second * time.Duration(1<<(b.multiplier-1))
}
b.notBefore = time.Now().Add(wait)
return wait
}
func (b *backoff) decreaseMultiplier() {
b.mu.Lock()
defer b.mu.Unlock()
if b.multiplier > 0 {
b.multiplier--
}
}
func (b *backoff) until() time.Time {
b.mu.RLock()
defer b.mu.RUnlock()
return b.notBefore
}

View File

@ -1,294 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsonclient
import (
"bytes"
"context"
"crypto"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
"strconv"
"strings"
"time"
ct "github.com/google/certificate-transparency-go"
"github.com/google/certificate-transparency-go/x509"
"golang.org/x/net/context/ctxhttp"
)
const maxJitter = 250 * time.Millisecond
type backoffer interface {
// set adjusts/increases the current backoff interval (typically on retryable failure);
// if the optional parameter is provided, this will be used as the interval if it is greater
// than the currently set interval. Returns the current wait period so that it can be
// logged along with any error message.
set(*time.Duration) time.Duration
// decreaseMultiplier reduces the current backoff multiplier, typically on success.
decreaseMultiplier()
// until returns the time until which the client should wait before making a request,
// it may be in the past in which case it should be ignored.
until() time.Time
}
// JSONClient provides common functionality for interacting with a JSON server
// that uses cryptographic signatures.
type JSONClient struct {
uri string // the base URI of the server. e.g. https://ct.googleapis/pilot
httpClient *http.Client // used to interact with the server via HTTP
Verifier *ct.SignatureVerifier // nil for no verification (e.g. no public key available)
logger Logger // interface to use for logging warnings and errors
backoff backoffer // object used to store and calculate backoff information
}
// Logger is a simple logging interface used to log internal errors and warnings
type Logger interface {
// Printf formats and logs a message
Printf(string, ...interface{})
}
// Options are the options for creating a new JSONClient.
type Options struct {
// Interface to use for logging warnings and errors, if nil the
// standard library log package will be used.
Logger Logger
// PEM format public key to use for signature verification.
PublicKey string
// DER format public key to use for signature verification.
PublicKeyDER []byte
}
// ParsePublicKey parses and returns the public key contained in opts.
// If both opts.PublicKey and opts.PublicKeyDER are set, PublicKeyDER is used.
// If neither is set, nil will be returned.
func (opts *Options) ParsePublicKey() (crypto.PublicKey, error) {
if len(opts.PublicKeyDER) > 0 {
return x509.ParsePKIXPublicKey(opts.PublicKeyDER)
}
if opts.PublicKey != "" {
pubkey, _ /* keyhash */, rest, err := ct.PublicKeyFromPEM([]byte(opts.PublicKey))
if err != nil {
return nil, err
}
if len(rest) > 0 {
return nil, errors.New("extra data found after PEM key decoded")
}
return pubkey, nil
}
return nil, nil
}
type basicLogger struct{}
func (bl *basicLogger) Printf(msg string, args ...interface{}) {
log.Printf(msg, args...)
}
// New constructs a new JSONClient instance, for the given base URI, using the
// given http.Client object (if provided) and the Options object.
// If opts does not specify a public key, signatures will not be verified.
func New(uri string, hc *http.Client, opts Options) (*JSONClient, error) {
pubkey, err := opts.ParsePublicKey()
if err != nil {
return nil, fmt.Errorf("invalid public key: %v", err)
}
var verifier *ct.SignatureVerifier
if pubkey != nil {
var err error
verifier, err = ct.NewSignatureVerifier(pubkey)
if err != nil {
return nil, err
}
}
if hc == nil {
hc = new(http.Client)
}
logger := opts.Logger
if logger == nil {
logger = &basicLogger{}
}
return &JSONClient{
uri: strings.TrimRight(uri, "/"),
httpClient: hc,
Verifier: verifier,
logger: logger,
backoff: &backoff{},
}, nil
}
// BaseURI returns the base URI that the JSONClient makes queries to.
func (c *JSONClient) BaseURI() string {
return c.uri
}
// GetAndParse makes a HTTP GET call to the given path, and attempta to parse
// the response as a JSON representation of the rsp structure. Returns the
// http.Response, the body of the response, and an error. Note that the
// returned http.Response can be non-nil even when an error is returned,
// in particular when the HTTP status is not OK or when the JSON parsing fails.
func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[string]string, rsp interface{}) (*http.Response, []byte, error) {
if ctx == nil {
return nil, nil, errors.New("context.Context required")
}
// Build a GET request with URL-encoded parameters.
vals := url.Values{}
for k, v := range params {
vals.Add(k, v)
}
fullURI := fmt.Sprintf("%s%s?%s", c.uri, path, vals.Encode())
httpReq, err := http.NewRequest(http.MethodGet, fullURI, nil)
if err != nil {
return nil, nil, err
}
httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq)
if err != nil {
return nil, nil, err
}
// Read everything now so http.Client can reuse the connection.
body, err := ioutil.ReadAll(httpRsp.Body)
httpRsp.Body.Close()
if err != nil {
return httpRsp, body, fmt.Errorf("failed to read response body: %v", err)
}
if httpRsp.StatusCode != http.StatusOK {
return httpRsp, body, fmt.Errorf("got HTTP Status %q", httpRsp.Status)
}
if err := json.NewDecoder(bytes.NewReader(body)).Decode(rsp); err != nil {
return httpRsp, body, err
}
return httpRsp, body, nil
}
// PostAndParse makes a HTTP POST call to the given path, including the request
// parameters, and attempts to parse the response as a JSON representation of
// the rsp structure. Returns the http.Response, the body of the response, and
// an error. Note that the returned http.Response can be non-nil even when an
// error is returned, in particular when the HTTP status is not OK or when the
// JSON parsing fails.
func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) {
if ctx == nil {
return nil, nil, errors.New("context.Context required")
}
// Build a POST request with JSON body.
postBody, err := json.Marshal(req)
if err != nil {
return nil, nil, err
}
fullURI := fmt.Sprintf("%s%s", c.uri, path)
httpReq, err := http.NewRequest(http.MethodPost, fullURI, bytes.NewReader(postBody))
if err != nil {
return nil, nil, err
}
httpReq.Header.Set("Content-Type", "application/json")
httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq)
// Read all of the body, if there is one, so that the http.Client can do Keep-Alive.
var body []byte
if httpRsp != nil {
body, err = ioutil.ReadAll(httpRsp.Body)
httpRsp.Body.Close()
}
if err != nil {
return httpRsp, body, err
}
if httpRsp.StatusCode == http.StatusOK {
if err = json.Unmarshal(body, &rsp); err != nil {
return httpRsp, body, err
}
}
return httpRsp, body, nil
}
// waitForBackoff blocks until the defined backoff interval or context has expired, if the returned
// not before time is in the past it returns immediately.
func (c *JSONClient) waitForBackoff(ctx context.Context) error {
dur := time.Until(c.backoff.until().Add(time.Millisecond * time.Duration(rand.Intn(int(maxJitter.Seconds()*1000)))))
if dur < 0 {
dur = 0
}
backoffTimer := time.NewTimer(dur)
select {
case <-ctx.Done():
return ctx.Err()
case <-backoffTimer.C:
}
return nil
}
// PostAndParseWithRetry makes a HTTP POST call, but retries (with backoff) on
// retriable errors; the caller should set a deadline on the provided context
// to prevent infinite retries. Return values are as for PostAndParse.
func (c *JSONClient) PostAndParseWithRetry(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) {
if ctx == nil {
return nil, nil, errors.New("context.Context required")
}
for {
httpRsp, body, err := c.PostAndParse(ctx, path, req, rsp)
if err != nil {
// Don't retry context errors.
if err == context.Canceled || err == context.DeadlineExceeded {
return nil, nil, err
}
wait := c.backoff.set(nil)
c.logger.Printf("Request failed, backing-off for %s: %s", wait, err)
} else {
switch {
case httpRsp.StatusCode == http.StatusOK:
return httpRsp, body, nil
case httpRsp.StatusCode == http.StatusRequestTimeout:
// Request timeout, retry immediately
c.logger.Printf("Request timed out, retrying immediately")
case httpRsp.StatusCode == http.StatusServiceUnavailable:
var backoff *time.Duration
// Retry-After may be either a number of seconds as a int or a RFC 1123
// date string (RFC 7231 Section 7.1.3)
if retryAfter := httpRsp.Header.Get("Retry-After"); retryAfter != "" {
if seconds, err := strconv.Atoi(retryAfter); err == nil {
b := time.Duration(seconds) * time.Second
backoff = &b
} else if date, err := time.Parse(time.RFC1123, retryAfter); err == nil {
b := date.Sub(time.Now())
backoff = &b
}
}
wait := c.backoff.set(backoff)
c.logger.Printf("Request failed, backing-off for %s: got HTTP status %s", wait, httpRsp.Status)
default:
return httpRsp, body, fmt.Errorf("got HTTP Status %q", httpRsp.Status)
}
}
if err := c.waitForBackoff(ctx); err != nil {
return nil, nil, err
}
}
}

View File

@ -1,347 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ct
import (
"crypto"
"crypto/sha256"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/google/certificate-transparency-go/tls"
"github.com/google/certificate-transparency-go/x509"
)
// SerializeSCTSignatureInput serializes the passed in sct and log entry into
// the correct format for signing.
func SerializeSCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) {
switch sct.SCTVersion {
case V1:
input := CertificateTimestamp{
SCTVersion: sct.SCTVersion,
SignatureType: CertificateTimestampSignatureType,
Timestamp: sct.Timestamp,
EntryType: entry.Leaf.TimestampedEntry.EntryType,
Extensions: sct.Extensions,
}
switch entry.Leaf.TimestampedEntry.EntryType {
case X509LogEntryType:
input.X509Entry = entry.Leaf.TimestampedEntry.X509Entry
case PrecertLogEntryType:
input.PrecertEntry = &PreCert{
IssuerKeyHash: entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
TBSCertificate: entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate,
}
case XJSONLogEntryType:
input.JSONEntry = entry.Leaf.TimestampedEntry.JSONEntry
default:
return nil, fmt.Errorf("unsupported entry type %s", entry.Leaf.TimestampedEntry.EntryType)
}
return tls.Marshal(input)
default:
return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
}
}
// SerializeSTHSignatureInput serializes the passed in STH into the correct
// format for signing.
func SerializeSTHSignatureInput(sth SignedTreeHead) ([]byte, error) {
switch sth.Version {
case V1:
if len(sth.SHA256RootHash) != crypto.SHA256.Size() {
return nil, fmt.Errorf("invalid TreeHash length, got %d expected %d", len(sth.SHA256RootHash), crypto.SHA256.Size())
}
input := TreeHeadSignature{
Version: sth.Version,
SignatureType: TreeHashSignatureType,
Timestamp: sth.Timestamp,
TreeSize: sth.TreeSize,
SHA256RootHash: sth.SHA256RootHash,
}
return tls.Marshal(input)
default:
return nil, fmt.Errorf("unsupported STH version %d", sth.Version)
}
}
// CreateX509MerkleTreeLeaf generates a MerkleTreeLeaf for an X509 cert
func CreateX509MerkleTreeLeaf(cert ASN1Cert, timestamp uint64) *MerkleTreeLeaf {
return &MerkleTreeLeaf{
Version: V1,
LeafType: TimestampedEntryLeafType,
TimestampedEntry: &TimestampedEntry{
Timestamp: timestamp,
EntryType: X509LogEntryType,
X509Entry: &cert,
},
}
}
// CreateJSONMerkleTreeLeaf creates the merkle tree leaf for json data.
func CreateJSONMerkleTreeLeaf(data interface{}, timestamp uint64) *MerkleTreeLeaf {
jsonData, err := json.Marshal(AddJSONRequest{Data: data})
if err != nil {
return nil
}
// Match the JSON serialization implemented by json-c
jsonStr := strings.Replace(string(jsonData), ":", ": ", -1)
jsonStr = strings.Replace(jsonStr, ",", ", ", -1)
jsonStr = strings.Replace(jsonStr, "{", "{ ", -1)
jsonStr = strings.Replace(jsonStr, "}", " }", -1)
jsonStr = strings.Replace(jsonStr, "/", `\/`, -1)
// TODO: Pending google/certificate-transparency#1243, replace with
// ObjectHash once supported by CT server.
return &MerkleTreeLeaf{
Version: V1,
LeafType: TimestampedEntryLeafType,
TimestampedEntry: &TimestampedEntry{
Timestamp: timestamp,
EntryType: XJSONLogEntryType,
JSONEntry: &JSONDataEntry{Data: []byte(jsonStr)},
},
}
}
// MerkleTreeLeafFromRawChain generates a MerkleTreeLeaf from a chain (in DER-encoded form) and timestamp.
func MerkleTreeLeafFromRawChain(rawChain []ASN1Cert, etype LogEntryType, timestamp uint64) (*MerkleTreeLeaf, error) {
// Need at most 3 of the chain
count := 3
if count > len(rawChain) {
count = len(rawChain)
}
chain := make([]*x509.Certificate, count)
for i := range chain {
cert, err := x509.ParseCertificate(rawChain[i].Data)
if x509.IsFatal(err) {
return nil, fmt.Errorf("failed to parse chain[%d] cert: %v", i, err)
}
chain[i] = cert
}
return MerkleTreeLeafFromChain(chain, etype, timestamp)
}
// MerkleTreeLeafFromChain generates a MerkleTreeLeaf from a chain and timestamp.
func MerkleTreeLeafFromChain(chain []*x509.Certificate, etype LogEntryType, timestamp uint64) (*MerkleTreeLeaf, error) {
leaf := MerkleTreeLeaf{
Version: V1,
LeafType: TimestampedEntryLeafType,
TimestampedEntry: &TimestampedEntry{
EntryType: etype,
Timestamp: timestamp,
},
}
if etype == X509LogEntryType {
leaf.TimestampedEntry.X509Entry = &ASN1Cert{Data: chain[0].Raw}
return &leaf, nil
}
if etype != PrecertLogEntryType {
return nil, fmt.Errorf("unknown LogEntryType %d", etype)
}
// Pre-certs are more complicated. First, parse the leaf pre-cert and its
// putative issuer.
if len(chain) < 2 {
return nil, fmt.Errorf("no issuer cert available for precert leaf building")
}
issuer := chain[1]
cert := chain[0]
var preIssuer *x509.Certificate
if IsPreIssuer(issuer) {
// Replace the cert's issuance information with details from the pre-issuer.
preIssuer = issuer
// The issuer of the pre-cert is not going to be the issuer of the final
// cert. Change to use the final issuer's key hash.
if len(chain) < 3 {
return nil, fmt.Errorf("no issuer cert available for pre-issuer")
}
issuer = chain[2]
}
// Next, post-process the DER-encoded TBSCertificate, to remove the CT poison
// extension and possibly update the issuer field.
defangedTBS, err := x509.BuildPrecertTBS(cert.RawTBSCertificate, preIssuer)
if err != nil {
return nil, fmt.Errorf("failed to remove poison extension: %v", err)
}
leaf.TimestampedEntry.EntryType = PrecertLogEntryType
leaf.TimestampedEntry.PrecertEntry = &PreCert{
IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo),
TBSCertificate: defangedTBS,
}
return &leaf, nil
}
// MerkleTreeLeafForEmbeddedSCT generates a MerkleTreeLeaf from a chain and an
// SCT timestamp, where the leaf certificate at chain[0] is a certificate that
// contains embedded SCTs. It is assumed that the timestamp provided is from
// one of the SCTs embedded within the leaf certificate.
func MerkleTreeLeafForEmbeddedSCT(chain []*x509.Certificate, timestamp uint64) (*MerkleTreeLeaf, error) {
// For building the leaf for a certificate and SCT where the SCT is embedded
// in the certificate, we need to build the original precertificate TBS
// data. First, parse the leaf cert and its issuer.
if len(chain) < 2 {
return nil, fmt.Errorf("no issuer cert available for precert leaf building")
}
issuer := chain[1]
cert := chain[0]
// Next, post-process the DER-encoded TBSCertificate, to remove the SCTList
// extension.
tbs, err := x509.RemoveSCTList(cert.RawTBSCertificate)
if err != nil {
return nil, fmt.Errorf("failed to remove SCT List extension: %v", err)
}
return &MerkleTreeLeaf{
Version: V1,
LeafType: TimestampedEntryLeafType,
TimestampedEntry: &TimestampedEntry{
EntryType: PrecertLogEntryType,
Timestamp: timestamp,
PrecertEntry: &PreCert{
IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo),
TBSCertificate: tbs,
},
},
}, nil
}
// LeafHashForLeaf returns the leaf hash for a Merkle tree leaf.
func LeafHashForLeaf(leaf *MerkleTreeLeaf) ([sha256.Size]byte, error) {
leafData, err := tls.Marshal(*leaf)
if err != nil {
return [sha256.Size]byte{}, fmt.Errorf("failed to tls-encode MerkleTreeLeaf: %s", err)
}
data := append([]byte{TreeLeafPrefix}, leafData...)
leafHash := sha256.Sum256(data)
return leafHash, nil
}
// IsPreIssuer indicates whether a certificate is a pre-cert issuer with the specific
// certificate transparency extended key usage.
func IsPreIssuer(issuer *x509.Certificate) bool {
for _, eku := range issuer.ExtKeyUsage {
if eku == x509.ExtKeyUsageCertificateTransparency {
return true
}
}
return false
}
// RawLogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data
// after JSON parsing) into a RawLogEntry object (i.e. a TLS-parsed structure).
func RawLogEntryFromLeaf(index int64, entry *LeafEntry) (*RawLogEntry, error) {
ret := RawLogEntry{Index: index}
if rest, err := tls.Unmarshal(entry.LeafInput, &ret.Leaf); err != nil {
return nil, fmt.Errorf("failed to unmarshal MerkleTreeLeaf: %v", err)
} else if len(rest) > 0 {
return nil, fmt.Errorf("MerkleTreeLeaf: trailing data %d bytes", len(rest))
}
switch eType := ret.Leaf.TimestampedEntry.EntryType; eType {
case X509LogEntryType:
var certChain CertificateChain
if rest, err := tls.Unmarshal(entry.ExtraData, &certChain); err != nil {
return nil, fmt.Errorf("failed to unmarshal CertificateChain: %v", err)
} else if len(rest) > 0 {
return nil, fmt.Errorf("CertificateChain: trailing data %d bytes", len(rest))
}
ret.Cert = *ret.Leaf.TimestampedEntry.X509Entry
ret.Chain = certChain.Entries
case PrecertLogEntryType:
var precertChain PrecertChainEntry
if rest, err := tls.Unmarshal(entry.ExtraData, &precertChain); err != nil {
return nil, fmt.Errorf("failed to unmarshal PrecertChainEntry: %v", err)
} else if len(rest) > 0 {
return nil, fmt.Errorf("PrecertChainEntry: trailing data %d bytes", len(rest))
}
ret.Cert = precertChain.PreCertificate
ret.Chain = precertChain.CertificateChain
default:
// TODO(pavelkalinnikov): Section 4.6 of RFC6962 implies that unknown types
// are not errors. We should revisit how we process this case.
return nil, fmt.Errorf("unknown entry type: %v", eType)
}
return &ret, nil
}
// ToLogEntry converts RawLogEntry to a LogEntry, which includes an x509-parsed
// (pre-)certificate.
//
// Note that this function may return a valid LogEntry object and a non-nil
// error value, when the error indicates a non-fatal parsing error.
func (rle *RawLogEntry) ToLogEntry() (*LogEntry, error) {
var err error
entry := LogEntry{Index: rle.Index, Leaf: rle.Leaf, Chain: rle.Chain}
switch eType := rle.Leaf.TimestampedEntry.EntryType; eType {
case X509LogEntryType:
entry.X509Cert, err = rle.Leaf.X509Certificate()
if x509.IsFatal(err) {
return nil, fmt.Errorf("failed to parse certificate: %v", err)
}
case PrecertLogEntryType:
var tbsCert *x509.Certificate
tbsCert, err = rle.Leaf.Precertificate()
if x509.IsFatal(err) {
return nil, fmt.Errorf("failed to parse precertificate: %v", err)
}
entry.Precert = &Precertificate{
Submitted: rle.Cert,
IssuerKeyHash: rle.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
TBSCertificate: tbsCert,
}
default:
return nil, fmt.Errorf("unknown entry type: %v", eType)
}
// err may be non-nil for a non-fatal error.
return &entry, err
}
// LogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data
// after JSON parsing) into a LogEntry object (which includes x509.Certificate
// objects, after TLS and ASN.1 parsing).
//
// Note that this function may return a valid LogEntry object and a non-nil
// error value, when the error indicates a non-fatal parsing error.
func LogEntryFromLeaf(index int64, leaf *LeafEntry) (*LogEntry, error) {
rle, err := RawLogEntryFromLeaf(index, leaf)
if err != nil {
return nil, err
}
return rle.ToLogEntry()
}
// TimestampToTime converts a timestamp in the style of RFC 6962 (milliseconds
// since UNIX epoch) to a Go Time.
func TimestampToTime(ts uint64) time.Time {
secs := int64(ts / 1000)
msecs := int64(ts % 1000)
return time.Unix(secs, msecs*1000000)
}

View File

@ -1,112 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ct
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/sha256"
"encoding/base64"
"encoding/pem"
"fmt"
"log"
"github.com/google/certificate-transparency-go/tls"
"github.com/google/certificate-transparency-go/x509"
)
// AllowVerificationWithNonCompliantKeys may be set to true in order to allow
// SignatureVerifier to use keys which are technically non-compliant with
// RFC6962.
var AllowVerificationWithNonCompliantKeys = false
// PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error.
func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) {
p, rest := pem.Decode(b)
if p == nil {
return nil, [sha256.Size]byte{}, rest, fmt.Errorf("no PEM block found in %s", string(b))
}
k, err := x509.ParsePKIXPublicKey(p.Bytes)
return k, sha256.Sum256(p.Bytes), rest, err
}
// PublicKeyFromB64 parses a base64-encoded public key.
func PublicKeyFromB64(b64PubKey string) (crypto.PublicKey, error) {
der, err := base64.StdEncoding.DecodeString(b64PubKey)
if err != nil {
return nil, fmt.Errorf("error decoding public key: %s", err)
}
return x509.ParsePKIXPublicKey(der)
}
// SignatureVerifier can verify signatures on SCTs and STHs
type SignatureVerifier struct {
pubKey crypto.PublicKey
}
// NewSignatureVerifier creates a new SignatureVerifier using the passed in PublicKey.
func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
switch pkType := pk.(type) {
case *rsa.PublicKey:
if pkType.N.BitLen() < 2048 {
e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen())
if !AllowVerificationWithNonCompliantKeys {
return nil, e
}
log.Printf("WARNING: %v", e)
}
case *ecdsa.PublicKey:
params := *(pkType.Params())
if params != *elliptic.P256().Params() {
e := fmt.Errorf("public is ECDSA, but not on the P256 curve")
if !AllowVerificationWithNonCompliantKeys {
return nil, e
}
log.Printf("WARNING: %v", e)
}
default:
return nil, fmt.Errorf("Unsupported public key type %v", pkType)
}
return &SignatureVerifier{
pubKey: pk,
}, nil
}
// VerifySignature verifies the given signature sig matches the data.
func (s SignatureVerifier) VerifySignature(data []byte, sig tls.DigitallySigned) error {
return tls.VerifySignature(s.pubKey, data, sig)
}
// VerifySCTSignature verifies that the SCT's signature is valid for the given LogEntry.
func (s SignatureVerifier) VerifySCTSignature(sct SignedCertificateTimestamp, entry LogEntry) error {
sctData, err := SerializeSCTSignatureInput(sct, entry)
if err != nil {
return err
}
return s.VerifySignature(sctData, tls.DigitallySigned(sct.Signature))
}
// VerifySTHSignature verifies that the STH's signature is valid.
func (s SignatureVerifier) VerifySTHSignature(sth SignedTreeHead) error {
sthData, err := SerializeSTHSignatureInput(sth)
if err != nil {
return err
}
return s.VerifySignature(sthData, tls.DigitallySigned(sth.TreeHeadSignature))
}

View File

@ -1,28 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"signature.go",
"tls.go",
"types.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/google/certificate-transparency-go/tls",
importpath = "github.com/google/certificate-transparency-go/tls",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/google/certificate-transparency-go/asn1:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,152 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tls
import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
_ "crypto/md5" // For registration side-effect
"crypto/rand"
"crypto/rsa"
_ "crypto/sha1" // For registration side-effect
_ "crypto/sha256" // For registration side-effect
_ "crypto/sha512" // For registration side-effect
"errors"
"fmt"
"log"
"math/big"
"github.com/google/certificate-transparency-go/asn1"
)
type dsaSig struct {
R, S *big.Int
}
func generateHash(algo HashAlgorithm, data []byte) ([]byte, crypto.Hash, error) {
var hashType crypto.Hash
switch algo {
case MD5:
hashType = crypto.MD5
case SHA1:
hashType = crypto.SHA1
case SHA224:
hashType = crypto.SHA224
case SHA256:
hashType = crypto.SHA256
case SHA384:
hashType = crypto.SHA384
case SHA512:
hashType = crypto.SHA512
default:
return nil, hashType, fmt.Errorf("unsupported Algorithm.Hash in signature: %v", algo)
}
hasher := hashType.New()
if _, err := hasher.Write(data); err != nil {
return nil, hashType, fmt.Errorf("failed to write to hasher: %v", err)
}
return hasher.Sum([]byte{}), hashType, nil
}
// VerifySignature verifies that the passed in signature over data was created by the given PublicKey.
func VerifySignature(pubKey crypto.PublicKey, data []byte, sig DigitallySigned) error {
hash, hashType, err := generateHash(sig.Algorithm.Hash, data)
if err != nil {
return err
}
switch sig.Algorithm.Signature {
case RSA:
rsaKey, ok := pubKey.(*rsa.PublicKey)
if !ok {
return fmt.Errorf("cannot verify RSA signature with %T key", pubKey)
}
if err := rsa.VerifyPKCS1v15(rsaKey, hashType, hash, sig.Signature); err != nil {
return fmt.Errorf("failed to verify rsa signature: %v", err)
}
case DSA:
dsaKey, ok := pubKey.(*dsa.PublicKey)
if !ok {
return fmt.Errorf("cannot verify DSA signature with %T key", pubKey)
}
var dsaSig dsaSig
rest, err := asn1.Unmarshal(sig.Signature, &dsaSig)
if err != nil {
return fmt.Errorf("failed to unmarshal DSA signature: %v", err)
}
if len(rest) != 0 {
log.Printf("Garbage following signature %v", rest)
}
if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 {
return errors.New("DSA signature contained zero or negative values")
}
if !dsa.Verify(dsaKey, hash, dsaSig.R, dsaSig.S) {
return errors.New("failed to verify DSA signature")
}
case ECDSA:
ecdsaKey, ok := pubKey.(*ecdsa.PublicKey)
if !ok {
return fmt.Errorf("cannot verify ECDSA signature with %T key", pubKey)
}
var ecdsaSig dsaSig
rest, err := asn1.Unmarshal(sig.Signature, &ecdsaSig)
if err != nil {
return fmt.Errorf("failed to unmarshal ECDSA signature: %v", err)
}
if len(rest) != 0 {
log.Printf("Garbage following signature %v", rest)
}
if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
return errors.New("ECDSA signature contained zero or negative values")
}
if !ecdsa.Verify(ecdsaKey, hash, ecdsaSig.R, ecdsaSig.S) {
return errors.New("failed to verify ECDSA signature")
}
default:
return fmt.Errorf("unsupported Algorithm.Signature in signature: %v", sig.Algorithm.Hash)
}
return nil
}
// CreateSignature builds a signature over the given data using the specified hash algorithm and private key.
func CreateSignature(privKey crypto.PrivateKey, hashAlgo HashAlgorithm, data []byte) (DigitallySigned, error) {
var sig DigitallySigned
sig.Algorithm.Hash = hashAlgo
hash, hashType, err := generateHash(sig.Algorithm.Hash, data)
if err != nil {
return sig, err
}
switch privKey := privKey.(type) {
case rsa.PrivateKey:
sig.Algorithm.Signature = RSA
sig.Signature, err = rsa.SignPKCS1v15(rand.Reader, &privKey, hashType, hash)
return sig, err
case ecdsa.PrivateKey:
sig.Algorithm.Signature = ECDSA
var ecdsaSig dsaSig
ecdsaSig.R, ecdsaSig.S, err = ecdsa.Sign(rand.Reader, &privKey, hash)
if err != nil {
return sig, err
}
sig.Signature, err = asn1.Marshal(ecdsaSig)
return sig, err
default:
return sig, fmt.Errorf("unsupported private key type %T", privKey)
}
}

View File

@ -1,711 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tls implements functionality for dealing with TLS-encoded data,
// as defined in RFC 5246. This includes parsing and generation of TLS-encoded
// data, together with utility functions for dealing with the DigitallySigned
// TLS type.
package tls
import (
"bytes"
"encoding/binary"
"fmt"
"reflect"
"strconv"
"strings"
)
// This file holds utility functions for TLS encoding/decoding data
// as per RFC 5246 section 4.
// A structuralError suggests that the TLS data is valid, but the Go type
// which is receiving it doesn't match.
type structuralError struct {
field string
msg string
}
func (e structuralError) Error() string {
var prefix string
if e.field != "" {
prefix = e.field + ": "
}
return "tls: structure error: " + prefix + e.msg
}
// A syntaxError suggests that the TLS data is invalid.
type syntaxError struct {
field string
msg string
}
func (e syntaxError) Error() string {
var prefix string
if e.field != "" {
prefix = e.field + ": "
}
return "tls: syntax error: " + prefix + e.msg
}
// Uint24 is an unsigned 3-byte integer.
type Uint24 uint32
// Enum is an unsigned integer.
type Enum uint64
var (
uint8Type = reflect.TypeOf(uint8(0))
uint16Type = reflect.TypeOf(uint16(0))
uint24Type = reflect.TypeOf(Uint24(0))
uint32Type = reflect.TypeOf(uint32(0))
uint64Type = reflect.TypeOf(uint64(0))
enumType = reflect.TypeOf(Enum(0))
)
// Unmarshal parses the TLS-encoded data in b and uses the reflect package to
// fill in an arbitrary value pointed at by val. Because Unmarshal uses the
// reflect package, the structs being written to must use exported fields
// (upper case names).
//
// The mappings between TLS types and Go types is as follows; some fields
// must have tags (to indicate their encoded size).
//
// TLS Go Required Tags
// opaque byte / uint8
// uint8 byte / uint8
// uint16 uint16
// uint24 tls.Uint24
// uint32 uint32
// uint64 uint64
// enum tls.Enum size:S or maxval:N
// Type<N,M> []Type minlen:N,maxlen:M
// opaque[N] [N]byte / [N]uint8
// uint8[N] [N]byte / [N]uint8
// struct { } struct { }
// select(T) {
// case e1: Type *T selector:Field,val:e1
// }
//
// TLS variants (RFC 5246 s4.6.1) are only supported when the value of the
// associated enumeration type is available earlier in the same enclosing
// struct, and each possible variant is marked with a selector tag (to
// indicate which field selects the variants) and a val tag (to indicate
// what value of the selector picks this particular field).
//
// For example, a TLS structure:
//
// enum { e1(1), e2(2) } EnumType;
// struct {
// EnumType sel;
// select(sel) {
// case e1: uint16
// case e2: uint32
// } data;
// } VariantItem;
//
// would have a corresponding Go type:
//
// type VariantItem struct {
// Sel tls.Enum `tls:"maxval:2"`
// Data16 *uint16 `tls:"selector:Sel,val:1"`
// Data32 *uint32 `tls:"selector:Sel,val:2"`
// }
//
// TLS fixed-length vectors of types other than opaque or uint8 are not supported.
//
// For TLS variable-length vectors that are themselves used in other vectors,
// create a single-field structure to represent the inner type. For example, for:
//
// opaque InnerType<1..65535>;
// struct {
// InnerType inners<1,65535>;
// } Something;
//
// convert to:
//
// type InnerType struct {
// Val []byte `tls:"minlen:1,maxlen:65535"`
// }
// type Something struct {
// Inners []InnerType `tls:"minlen:1,maxlen:65535"`
// }
//
// If the encoded value does not fit in the Go type, Unmarshal returns a parse error.
func Unmarshal(b []byte, val interface{}) ([]byte, error) {
return UnmarshalWithParams(b, val, "")
}
// UnmarshalWithParams allows field parameters to be specified for the
// top-level element. The form of the params is the same as the field tags.
func UnmarshalWithParams(b []byte, val interface{}, params string) ([]byte, error) {
info, err := fieldTagToFieldInfo(params, "")
if err != nil {
return nil, err
}
// The passed in interface{} is a pointer (to allow the value to be written
// to); extract the pointed-to object as a reflect.Value, so parseField
// can do various introspection things.
v := reflect.ValueOf(val).Elem()
offset, err := parseField(v, b, 0, info)
if err != nil {
return nil, err
}
return b[offset:], nil
}
// Return the number of bytes needed to encode values up to (and including) x.
func byteCount(x uint64) uint {
switch {
case x < 0x100:
return 1
case x < 0x10000:
return 2
case x < 0x1000000:
return 3
case x < 0x100000000:
return 4
case x < 0x10000000000:
return 5
case x < 0x1000000000000:
return 6
case x < 0x100000000000000:
return 7
default:
return 8
}
}
type fieldInfo struct {
count uint // Number of bytes
countSet bool
minlen uint64 // Only relevant for slices
maxlen uint64 // Only relevant for slices
selector string // Only relevant for select sub-values
val uint64 // Only relevant for select sub-values
name string // Used for better error messages
}
func (i *fieldInfo) fieldName() string {
if i == nil {
return ""
}
return i.name
}
// Given a tag string, return a fieldInfo describing the field.
func fieldTagToFieldInfo(str string, name string) (*fieldInfo, error) {
var info *fieldInfo
// Iterate over clauses in the tag, ignoring any that don't parse properly.
for _, part := range strings.Split(str, ",") {
switch {
case strings.HasPrefix(part, "maxval:"):
if v, err := strconv.ParseUint(part[7:], 10, 64); err == nil {
info = &fieldInfo{count: byteCount(v), countSet: true}
}
case strings.HasPrefix(part, "size:"):
if sz, err := strconv.ParseUint(part[5:], 10, 32); err == nil {
info = &fieldInfo{count: uint(sz), countSet: true}
}
case strings.HasPrefix(part, "maxlen:"):
v, err := strconv.ParseUint(part[7:], 10, 64)
if err != nil {
continue
}
if info == nil {
info = &fieldInfo{}
}
info.count = byteCount(v)
info.countSet = true
info.maxlen = v
case strings.HasPrefix(part, "minlen:"):
v, err := strconv.ParseUint(part[7:], 10, 64)
if err != nil {
continue
}
if info == nil {
info = &fieldInfo{}
}
info.minlen = v
case strings.HasPrefix(part, "selector:"):
if info == nil {
info = &fieldInfo{}
}
info.selector = part[9:]
case strings.HasPrefix(part, "val:"):
v, err := strconv.ParseUint(part[4:], 10, 64)
if err != nil {
continue
}
if info == nil {
info = &fieldInfo{}
}
info.val = v
}
}
if info != nil {
info.name = name
if info.selector == "" {
if info.count < 1 {
return nil, structuralError{name, "field of unknown size in " + str}
} else if info.count > 8 {
return nil, structuralError{name, "specified size too large in " + str}
} else if info.minlen > info.maxlen {
return nil, structuralError{name, "specified length range inverted in " + str}
} else if info.val > 0 {
return nil, structuralError{name, "specified selector value but not field in " + str}
}
}
} else if name != "" {
info = &fieldInfo{name: name}
}
return info, nil
}
// Check that a value fits into a field described by a fieldInfo structure.
func (i fieldInfo) check(val uint64, fldName string) error {
if val >= (1 << (8 * i.count)) {
return structuralError{fldName, fmt.Sprintf("value %d too large for size", val)}
}
if i.maxlen != 0 {
if val < i.minlen {
return structuralError{fldName, fmt.Sprintf("value %d too small for minimum %d", val, i.minlen)}
}
if val > i.maxlen {
return structuralError{fldName, fmt.Sprintf("value %d too large for maximum %d", val, i.maxlen)}
}
}
return nil
}
// readVarUint reads an big-endian unsigned integer of the given size in
// bytes.
func readVarUint(data []byte, info *fieldInfo) (uint64, error) {
if info == nil || !info.countSet {
return 0, structuralError{info.fieldName(), "no field size information available"}
}
if len(data) < int(info.count) {
return 0, syntaxError{info.fieldName(), "truncated variable-length integer"}
}
var result uint64
for i := uint(0); i < info.count; i++ {
result = (result << 8) | uint64(data[i])
}
if err := info.check(result, info.name); err != nil {
return 0, err
}
return result, nil
}
// parseField is the main parsing function. Given a byte slice and an offset
// (in bytes) into the data, it will try to parse a suitable ASN.1 value out
// and store it in the given Value.
func parseField(v reflect.Value, data []byte, initOffset int, info *fieldInfo) (int, error) {
offset := initOffset
rest := data[offset:]
fieldType := v.Type()
// First look for known fixed types.
switch fieldType {
case uint8Type:
if len(rest) < 1 {
return offset, syntaxError{info.fieldName(), "truncated uint8"}
}
v.SetUint(uint64(rest[0]))
offset++
return offset, nil
case uint16Type:
if len(rest) < 2 {
return offset, syntaxError{info.fieldName(), "truncated uint16"}
}
v.SetUint(uint64(binary.BigEndian.Uint16(rest)))
offset += 2
return offset, nil
case uint24Type:
if len(rest) < 3 {
return offset, syntaxError{info.fieldName(), "truncated uint24"}
}
v.SetUint(uint64(data[0])<<16 | uint64(data[1])<<8 | uint64(data[2]))
offset += 3
return offset, nil
case uint32Type:
if len(rest) < 4 {
return offset, syntaxError{info.fieldName(), "truncated uint32"}
}
v.SetUint(uint64(binary.BigEndian.Uint32(rest)))
offset += 4
return offset, nil
case uint64Type:
if len(rest) < 8 {
return offset, syntaxError{info.fieldName(), "truncated uint64"}
}
v.SetUint(uint64(binary.BigEndian.Uint64(rest)))
offset += 8
return offset, nil
}
// Now deal with user-defined types.
switch v.Kind() {
case enumType.Kind():
// Assume that anything of the same kind as Enum is an Enum, so that
// users can alias types of their own to Enum.
val, err := readVarUint(rest, info)
if err != nil {
return offset, err
}
v.SetUint(val)
offset += int(info.count)
return offset, nil
case reflect.Struct:
structType := fieldType
// TLS includes a select(Enum) {..} construct, where the value of an enum
// indicates which variant field is present (like a C union). We require
// that the enum value be an earlier field in the same structure (the selector),
// and that each of the possible variant destination fields be pointers.
// So the Go mapping looks like:
// type variantType struct {
// Which tls.Enum `tls:"size:1"` // this is the selector
// Val1 *type1 `tls:"selector:Which,val:1"` // this is a destination
// Val2 *type2 `tls:"selector:Which,val:1"` // this is a destination
// }
// To deal with this, we track any enum-like fields and their values...
enums := make(map[string]uint64)
// .. and we track which selector names we've seen (in the destination field tags),
// and whether a destination for that selector has been chosen.
selectorSeen := make(map[string]bool)
for i := 0; i < structType.NumField(); i++ {
// Find information about this field.
tag := structType.Field(i).Tag.Get("tls")
fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name)
if err != nil {
return offset, err
}
destination := v.Field(i)
if fieldInfo.selector != "" {
// This is a possible select(Enum) destination, so first check that the referenced
// selector field has already been seen earlier in the struct.
choice, ok := enums[fieldInfo.selector]
if !ok {
return offset, structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector}
}
if structType.Field(i).Type.Kind() != reflect.Ptr {
return offset, structuralError{fieldInfo.name, "choice field not a pointer type"}
}
// Is this the first mention of the selector field name? If so, remember it.
seen, ok := selectorSeen[fieldInfo.selector]
if !ok {
selectorSeen[fieldInfo.selector] = false
}
if choice != fieldInfo.val {
// This destination field was not the chosen one, so make it nil (we checked
// it was a pointer above).
v.Field(i).Set(reflect.Zero(structType.Field(i).Type))
continue
}
if seen {
// We already saw a different destination field receive the value for this
// selector value, which indicates a badly annotated structure.
return offset, structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector}
}
selectorSeen[fieldInfo.selector] = true
// Make an object of the pointed-to type and parse into that.
v.Field(i).Set(reflect.New(structType.Field(i).Type.Elem()))
destination = v.Field(i).Elem()
}
offset, err = parseField(destination, data, offset, fieldInfo)
if err != nil {
return offset, err
}
// Remember any possible tls.Enum values encountered in case they are selectors.
if structType.Field(i).Type.Kind() == enumType.Kind() {
enums[structType.Field(i).Name] = v.Field(i).Uint()
}
}
// Now we have seen all fields in the structure, check that all select(Enum) {..} selector
// fields found a destination to put their data in.
for selector, seen := range selectorSeen {
if !seen {
return offset, syntaxError{info.fieldName(), selector + ": unhandled value for selector"}
}
}
return offset, nil
case reflect.Array:
datalen := v.Len()
if datalen > len(rest) {
return offset, syntaxError{info.fieldName(), "truncated array"}
}
inner := rest[:datalen]
offset += datalen
if fieldType.Elem().Kind() != reflect.Uint8 {
// Only byte/uint8 arrays are supported
return offset, structuralError{info.fieldName(), "unsupported array type: " + v.Type().String()}
}
reflect.Copy(v, reflect.ValueOf(inner))
return offset, nil
case reflect.Slice:
sliceType := fieldType
// Slices represent variable-length vectors, which are prefixed by a length field.
// The fieldInfo indicates the size of that length field.
varlen, err := readVarUint(rest, info)
if err != nil {
return offset, err
}
datalen := int(varlen)
offset += int(info.count)
rest = rest[info.count:]
if datalen > len(rest) {
return offset, syntaxError{info.fieldName(), "truncated slice"}
}
inner := rest[:datalen]
offset += datalen
if fieldType.Elem().Kind() == reflect.Uint8 {
// Fast version for []byte
v.Set(reflect.MakeSlice(sliceType, datalen, datalen))
reflect.Copy(v, reflect.ValueOf(inner))
return offset, nil
}
v.Set(reflect.MakeSlice(sliceType, 0, datalen))
single := reflect.New(sliceType.Elem())
for innerOffset := 0; innerOffset < len(inner); {
var err error
innerOffset, err = parseField(single.Elem(), inner, innerOffset, nil)
if err != nil {
return offset, err
}
v.Set(reflect.Append(v, single.Elem()))
}
return offset, nil
default:
return offset, structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())}
}
}
// Marshal returns the TLS encoding of val.
func Marshal(val interface{}) ([]byte, error) {
return MarshalWithParams(val, "")
}
// MarshalWithParams returns the TLS encoding of val, and allows field
// parameters to be specified for the top-level element. The form
// of the params is the same as the field tags.
func MarshalWithParams(val interface{}, params string) ([]byte, error) {
info, err := fieldTagToFieldInfo(params, "")
if err != nil {
return nil, err
}
var out bytes.Buffer
v := reflect.ValueOf(val)
if err := marshalField(&out, v, info); err != nil {
return nil, err
}
return out.Bytes(), err
}
func marshalField(out *bytes.Buffer, v reflect.Value, info *fieldInfo) error {
var prefix string
if info != nil && len(info.name) > 0 {
prefix = info.name + ": "
}
fieldType := v.Type()
// First look for known fixed types.
switch fieldType {
case uint8Type:
out.WriteByte(byte(v.Uint()))
return nil
case uint16Type:
scratch := make([]byte, 2)
binary.BigEndian.PutUint16(scratch, uint16(v.Uint()))
out.Write(scratch)
return nil
case uint24Type:
i := v.Uint()
if i > 0xffffff {
return structuralError{info.fieldName(), fmt.Sprintf("uint24 overflow %d", i)}
}
scratch := make([]byte, 4)
binary.BigEndian.PutUint32(scratch, uint32(i))
out.Write(scratch[1:])
return nil
case uint32Type:
scratch := make([]byte, 4)
binary.BigEndian.PutUint32(scratch, uint32(v.Uint()))
out.Write(scratch)
return nil
case uint64Type:
scratch := make([]byte, 8)
binary.BigEndian.PutUint64(scratch, uint64(v.Uint()))
out.Write(scratch)
return nil
}
// Now deal with user-defined types.
switch v.Kind() {
case enumType.Kind():
i := v.Uint()
if info == nil {
return structuralError{info.fieldName(), "enum field tag missing"}
}
if err := info.check(i, prefix); err != nil {
return err
}
scratch := make([]byte, 8)
binary.BigEndian.PutUint64(scratch, uint64(i))
out.Write(scratch[(8 - info.count):])
return nil
case reflect.Struct:
structType := fieldType
enums := make(map[string]uint64) // Values of any Enum fields
// The comment parseField() describes the mapping of the TLS select(Enum) {..} construct;
// here we have selector and source (rather than destination) fields.
// Track which selector names we've seen (in the source field tags), and whether a source
// value for that selector has been processed.
selectorSeen := make(map[string]bool)
for i := 0; i < structType.NumField(); i++ {
// Find information about this field.
tag := structType.Field(i).Tag.Get("tls")
fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name)
if err != nil {
return err
}
source := v.Field(i)
if fieldInfo.selector != "" {
// This field is a possible source for a select(Enum) {..}. First check
// the selector field name has been seen.
choice, ok := enums[fieldInfo.selector]
if !ok {
return structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector}
}
if structType.Field(i).Type.Kind() != reflect.Ptr {
return structuralError{fieldInfo.name, "choice field not a pointer type"}
}
// Is this the first mention of the selector field name? If so, remember it.
seen, ok := selectorSeen[fieldInfo.selector]
if !ok {
selectorSeen[fieldInfo.selector] = false
}
if choice != fieldInfo.val {
// This source was not chosen; police that it should be nil.
if v.Field(i).Pointer() != uintptr(0) {
return structuralError{fieldInfo.name, "unchosen field is non-nil"}
}
continue
}
if seen {
// We already saw a different source field generate the value for this
// selector value, which indicates a badly annotated structure.
return structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector}
}
selectorSeen[fieldInfo.selector] = true
if v.Field(i).Pointer() == uintptr(0) {
return structuralError{fieldInfo.name, "chosen field is nil"}
}
// Marshal from the pointed-to source object.
source = v.Field(i).Elem()
}
var fieldData bytes.Buffer
if err := marshalField(&fieldData, source, fieldInfo); err != nil {
return err
}
out.Write(fieldData.Bytes())
// Remember any tls.Enum values encountered in case they are selectors.
if structType.Field(i).Type.Kind() == enumType.Kind() {
enums[structType.Field(i).Name] = v.Field(i).Uint()
}
}
// Now we have seen all fields in the structure, check that all select(Enum) {..} selector
// fields found a source field get get their data from.
for selector, seen := range selectorSeen {
if !seen {
return syntaxError{info.fieldName(), selector + ": unhandled value for selector"}
}
}
return nil
case reflect.Array:
datalen := v.Len()
arrayType := fieldType
if arrayType.Elem().Kind() != reflect.Uint8 {
// Only byte/uint8 arrays are supported
return structuralError{info.fieldName(), "unsupported array type"}
}
bytes := make([]byte, datalen)
for i := 0; i < datalen; i++ {
bytes[i] = uint8(v.Index(i).Uint())
}
_, err := out.Write(bytes)
return err
case reflect.Slice:
if info == nil {
return structuralError{info.fieldName(), "slice field tag missing"}
}
sliceType := fieldType
if sliceType.Elem().Kind() == reflect.Uint8 {
// Fast version for []byte: first write the length as info.count bytes.
datalen := v.Len()
scratch := make([]byte, 8)
binary.BigEndian.PutUint64(scratch, uint64(datalen))
out.Write(scratch[(8 - info.count):])
if err := info.check(uint64(datalen), prefix); err != nil {
return err
}
// Then just write the data.
bytes := make([]byte, datalen)
for i := 0; i < datalen; i++ {
bytes[i] = uint8(v.Index(i).Uint())
}
_, err := out.Write(bytes)
return err
}
// General version: use a separate Buffer to write the slice entries into.
var innerBuf bytes.Buffer
for i := 0; i < v.Len(); i++ {
if err := marshalField(&innerBuf, v.Index(i), nil); err != nil {
return err
}
}
// Now insert (and check) the size.
size := uint64(innerBuf.Len())
if err := info.check(size, prefix); err != nil {
return err
}
scratch := make([]byte, 8)
binary.BigEndian.PutUint64(scratch, size)
out.Write(scratch[(8 - info.count):])
// Then copy the data.
_, err := out.Write(innerBuf.Bytes())
return err
default:
return structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())}
}
}

View File

@ -1,117 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tls
import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/rsa"
"fmt"
)
// DigitallySigned gives information about a signature, including the algorithm used
// and the signature value. Defined in RFC 5246 s4.7.
type DigitallySigned struct {
Algorithm SignatureAndHashAlgorithm
Signature []byte `tls:"minlen:0,maxlen:65535"`
}
func (d DigitallySigned) String() string {
return fmt.Sprintf("Signature: HashAlgo=%v SignAlgo=%v Value=%x", d.Algorithm.Hash, d.Algorithm.Signature, d.Signature)
}
// SignatureAndHashAlgorithm gives information about the algorithms used for a
// signature. Defined in RFC 5246 s7.4.1.4.1.
type SignatureAndHashAlgorithm struct {
Hash HashAlgorithm `tls:"maxval:255"`
Signature SignatureAlgorithm `tls:"maxval:255"`
}
// HashAlgorithm enum from RFC 5246 s7.4.1.4.1.
type HashAlgorithm Enum
// HashAlgorithm constants from RFC 5246 s7.4.1.4.1.
const (
None HashAlgorithm = 0
MD5 HashAlgorithm = 1
SHA1 HashAlgorithm = 2
SHA224 HashAlgorithm = 3
SHA256 HashAlgorithm = 4
SHA384 HashAlgorithm = 5
SHA512 HashAlgorithm = 6
)
func (h HashAlgorithm) String() string {
switch h {
case None:
return "None"
case MD5:
return "MD5"
case SHA1:
return "SHA1"
case SHA224:
return "SHA224"
case SHA256:
return "SHA256"
case SHA384:
return "SHA384"
case SHA512:
return "SHA512"
default:
return fmt.Sprintf("UNKNOWN(%d)", h)
}
}
// SignatureAlgorithm enum from RFC 5246 s7.4.1.4.1.
type SignatureAlgorithm Enum
// SignatureAlgorithm constants from RFC 5246 s7.4.1.4.1.
const (
Anonymous SignatureAlgorithm = 0
RSA SignatureAlgorithm = 1
DSA SignatureAlgorithm = 2
ECDSA SignatureAlgorithm = 3
)
func (s SignatureAlgorithm) String() string {
switch s {
case Anonymous:
return "Anonymous"
case RSA:
return "RSA"
case DSA:
return "DSA"
case ECDSA:
return "ECDSA"
default:
return fmt.Sprintf("UNKNOWN(%d)", s)
}
}
// SignatureAlgorithmFromPubKey returns the algorithm used for this public key.
// ECDSA, RSA, and DSA keys are supported. Other key types will return Anonymous.
func SignatureAlgorithmFromPubKey(k crypto.PublicKey) SignatureAlgorithm {
switch k.(type) {
case *ecdsa.PublicKey:
return ECDSA
case *rsa.PublicKey:
return RSA
case *dsa.PublicKey:
return DSA
default:
return Anonymous
}
}

View File

@ -1,528 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ct holds core types and utilities for Certificate Transparency.
package ct
import (
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"github.com/google/certificate-transparency-go/tls"
"github.com/google/certificate-transparency-go/x509"
)
///////////////////////////////////////////////////////////////////////////////
// The following structures represent those outlined in RFC6962; any section
// numbers mentioned refer to that RFC.
///////////////////////////////////////////////////////////////////////////////
// LogEntryType represents the LogEntryType enum from section 3.1:
// enum { x509_entry(0), precert_entry(1), (65535) } LogEntryType;
type LogEntryType tls.Enum // tls:"maxval:65535"
// LogEntryType constants from section 3.1.
const (
X509LogEntryType LogEntryType = 0
PrecertLogEntryType LogEntryType = 1
XJSONLogEntryType LogEntryType = 0x8000 // Experimental. Don't rely on this!
)
func (e LogEntryType) String() string {
switch e {
case X509LogEntryType:
return "X509LogEntryType"
case PrecertLogEntryType:
return "PrecertLogEntryType"
case XJSONLogEntryType:
return "XJSONLogEntryType"
default:
return fmt.Sprintf("UnknownEntryType(%d)", e)
}
}
// RFC6962 section 2.1 requires a prefix byte on hash inputs for second preimage resistance.
const (
TreeLeafPrefix = byte(0x00)
TreeNodePrefix = byte(0x01)
)
// MerkleLeafType represents the MerkleLeafType enum from section 3.4:
// enum { timestamped_entry(0), (255) } MerkleLeafType;
type MerkleLeafType tls.Enum // tls:"maxval:255"
// TimestampedEntryLeafType is the only defined MerkleLeafType constant from section 3.4.
const TimestampedEntryLeafType MerkleLeafType = 0 // Entry type for an SCT
func (m MerkleLeafType) String() string {
switch m {
case TimestampedEntryLeafType:
return "TimestampedEntryLeafType"
default:
return fmt.Sprintf("UnknownLeafType(%d)", m)
}
}
// Version represents the Version enum from section 3.2:
// enum { v1(0), (255) } Version;
type Version tls.Enum // tls:"maxval:255"
// CT Version constants from section 3.2.
const (
V1 Version = 0
)
func (v Version) String() string {
switch v {
case V1:
return "V1"
default:
return fmt.Sprintf("UnknownVersion(%d)", v)
}
}
// SignatureType differentiates STH signatures from SCT signatures, see section 3.2.
// enum { certificate_timestamp(0), tree_hash(1), (255) } SignatureType;
type SignatureType tls.Enum // tls:"maxval:255"
// SignatureType constants from section 3.2.
const (
CertificateTimestampSignatureType SignatureType = 0
TreeHashSignatureType SignatureType = 1
)
func (st SignatureType) String() string {
switch st {
case CertificateTimestampSignatureType:
return "CertificateTimestamp"
case TreeHashSignatureType:
return "TreeHash"
default:
return fmt.Sprintf("UnknownSignatureType(%d)", st)
}
}
// ASN1Cert type for holding the raw DER bytes of an ASN.1 Certificate
// (section 3.1).
type ASN1Cert struct {
Data []byte `tls:"minlen:1,maxlen:16777215"`
}
// LogID holds the hash of the Log's public key (section 3.2).
// TODO(pphaneuf): Users should be migrated to the one in the logid package.
type LogID struct {
KeyID [sha256.Size]byte
}
// PreCert represents a Precertificate (section 3.2).
type PreCert struct {
IssuerKeyHash [sha256.Size]byte
TBSCertificate []byte `tls:"minlen:1,maxlen:16777215"` // DER-encoded TBSCertificate
}
// CTExtensions is a representation of the raw bytes of any CtExtension
// structure (see section 3.2).
// nolint: golint
type CTExtensions []byte // tls:"minlen:0,maxlen:65535"`
// MerkleTreeNode represents an internal node in the CT tree.
type MerkleTreeNode []byte
// ConsistencyProof represents a CT consistency proof (see sections 2.1.2 and
// 4.4).
type ConsistencyProof []MerkleTreeNode
// AuditPath represents a CT inclusion proof (see sections 2.1.1 and 4.5).
type AuditPath []MerkleTreeNode
// LeafInput represents a serialized MerkleTreeLeaf structure.
type LeafInput []byte
// DigitallySigned is a local alias for tls.DigitallySigned so that we can
// attach a MarshalJSON method.
type DigitallySigned tls.DigitallySigned
// FromBase64String populates the DigitallySigned structure from the base64 data passed in.
// Returns an error if the base64 data is invalid.
func (d *DigitallySigned) FromBase64String(b64 string) error {
raw, err := base64.StdEncoding.DecodeString(b64)
if err != nil {
return fmt.Errorf("failed to unbase64 DigitallySigned: %v", err)
}
var ds tls.DigitallySigned
if rest, err := tls.Unmarshal(raw, &ds); err != nil {
return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err)
} else if len(rest) > 0 {
return fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest))
}
*d = DigitallySigned(ds)
return nil
}
// Base64String returns the base64 representation of the DigitallySigned struct.
func (d DigitallySigned) Base64String() (string, error) {
b, err := tls.Marshal(d)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(b), nil
}
// MarshalJSON implements the json.Marshaller interface.
func (d DigitallySigned) MarshalJSON() ([]byte, error) {
b64, err := d.Base64String()
if err != nil {
return []byte{}, err
}
return []byte(`"` + b64 + `"`), nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (d *DigitallySigned) UnmarshalJSON(b []byte) error {
var content string
if err := json.Unmarshal(b, &content); err != nil {
return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err)
}
return d.FromBase64String(content)
}
// RawLogEntry represents the (TLS-parsed) contents of an entry in a CT log.
type RawLogEntry struct {
// Index is a position of the entry in the log.
Index int64
// Leaf is a parsed Merkle leaf hash input.
Leaf MerkleTreeLeaf
// Cert is:
// - A certificate if Leaf.TimestampedEntry.EntryType is X509LogEntryType.
// - A precertificate if Leaf.TimestampedEntry.EntryType is
// PrecertLogEntryType, in the form of a DER-encoded Certificate as
// originally added (which includes the poison extension and a signature
// generated over the pre-cert by the pre-cert issuer).
// - Empty otherwise.
Cert ASN1Cert
// Chain is the issuing certificate chain starting with the issuer of Cert,
// or an empty slice if Cert is empty.
Chain []ASN1Cert
}
// LogEntry represents the (parsed) contents of an entry in a CT log. This is described
// in section 3.1, but note that this structure does *not* match the TLS structure
// defined there (the TLS structure is never used directly in RFC6962).
type LogEntry struct {
Index int64
Leaf MerkleTreeLeaf
// Exactly one of the following three fields should be non-empty.
X509Cert *x509.Certificate // Parsed X.509 certificate
Precert *Precertificate // Extracted precertificate
JSONData []byte
// Chain holds the issuing certificate chain, starting with the
// issuer of the leaf certificate / pre-certificate.
Chain []ASN1Cert
}
// PrecertChainEntry holds an precertificate together with a validation chain
// for it; see section 3.1.
type PrecertChainEntry struct {
PreCertificate ASN1Cert `tls:"minlen:1,maxlen:16777215"`
CertificateChain []ASN1Cert `tls:"minlen:0,maxlen:16777215"`
}
// CertificateChain holds a chain of certificates, as returned as extra data
// for get-entries (section 4.6).
type CertificateChain struct {
Entries []ASN1Cert `tls:"minlen:0,maxlen:16777215"`
}
// JSONDataEntry holds arbitrary data.
type JSONDataEntry struct {
Data []byte `tls:"minlen:0,maxlen:1677215"`
}
// SHA256Hash represents the output from the SHA256 hash function.
type SHA256Hash [sha256.Size]byte
// FromBase64String populates the SHA256 struct with the contents of the base64 data passed in.
func (s *SHA256Hash) FromBase64String(b64 string) error {
bs, err := base64.StdEncoding.DecodeString(b64)
if err != nil {
return fmt.Errorf("failed to unbase64 LogID: %v", err)
}
if len(bs) != sha256.Size {
return fmt.Errorf("invalid SHA256 length, expected 32 but got %d", len(bs))
}
copy(s[:], bs)
return nil
}
// Base64String returns the base64 representation of this SHA256Hash.
func (s SHA256Hash) Base64String() string {
return base64.StdEncoding.EncodeToString(s[:])
}
// MarshalJSON implements the json.Marshaller interface for SHA256Hash.
func (s SHA256Hash) MarshalJSON() ([]byte, error) {
return []byte(`"` + s.Base64String() + `"`), nil
}
// UnmarshalJSON implements the json.Unmarshaller interface.
func (s *SHA256Hash) UnmarshalJSON(b []byte) error {
var content string
if err := json.Unmarshal(b, &content); err != nil {
return fmt.Errorf("failed to unmarshal SHA256Hash: %v", err)
}
return s.FromBase64String(content)
}
// SignedTreeHead represents the structure returned by the get-sth CT method
// after base64 decoding; see sections 3.5 and 4.3.
type SignedTreeHead struct {
Version Version `json:"sth_version"` // The version of the protocol to which the STH conforms
TreeSize uint64 `json:"tree_size"` // The number of entries in the new tree
Timestamp uint64 `json:"timestamp"` // The time at which the STH was created
SHA256RootHash SHA256Hash `json:"sha256_root_hash"` // The root hash of the log's Merkle tree
TreeHeadSignature DigitallySigned `json:"tree_head_signature"` // Log's signature over a TLS-encoded TreeHeadSignature
LogID SHA256Hash `json:"log_id"` // The SHA256 hash of the log's public key
}
// TreeHeadSignature holds the data over which the signature in an STH is
// generated; see section 3.5
type TreeHeadSignature struct {
Version Version `tls:"maxval:255"`
SignatureType SignatureType `tls:"maxval:255"` // == TreeHashSignatureType
Timestamp uint64
TreeSize uint64
SHA256RootHash SHA256Hash
}
// SignedCertificateTimestamp represents the structure returned by the
// add-chain and add-pre-chain methods after base64 decoding; see sections
// 3.2, 4.1 and 4.2.
type SignedCertificateTimestamp struct {
SCTVersion Version `tls:"maxval:255"`
LogID LogID
Timestamp uint64
Extensions CTExtensions `tls:"minlen:0,maxlen:65535"`
Signature DigitallySigned // Signature over TLS-encoded CertificateTimestamp
}
// CertificateTimestamp is the collection of data that the signature in an
// SCT is over; see section 3.2.
type CertificateTimestamp struct {
SCTVersion Version `tls:"maxval:255"`
SignatureType SignatureType `tls:"maxval:255"`
Timestamp uint64
EntryType LogEntryType `tls:"maxval:65535"`
X509Entry *ASN1Cert `tls:"selector:EntryType,val:0"`
PrecertEntry *PreCert `tls:"selector:EntryType,val:1"`
JSONEntry *JSONDataEntry `tls:"selector:EntryType,val:32768"`
Extensions CTExtensions `tls:"minlen:0,maxlen:65535"`
}
func (s SignedCertificateTimestamp) String() string {
return fmt.Sprintf("{Version:%d LogId:%s Timestamp:%d Extensions:'%s' Signature:%v}", s.SCTVersion,
base64.StdEncoding.EncodeToString(s.LogID.KeyID[:]),
s.Timestamp,
s.Extensions,
s.Signature)
}
// TimestampedEntry is part of the MerkleTreeLeaf structure; see section 3.4.
type TimestampedEntry struct {
Timestamp uint64
EntryType LogEntryType `tls:"maxval:65535"`
X509Entry *ASN1Cert `tls:"selector:EntryType,val:0"`
PrecertEntry *PreCert `tls:"selector:EntryType,val:1"`
JSONEntry *JSONDataEntry `tls:"selector:EntryType,val:32768"`
Extensions CTExtensions `tls:"minlen:0,maxlen:65535"`
}
// MerkleTreeLeaf represents the deserialized structure of the hash input for the
// leaves of a log's Merkle tree; see section 3.4.
type MerkleTreeLeaf struct {
Version Version `tls:"maxval:255"`
LeafType MerkleLeafType `tls:"maxval:255"`
TimestampedEntry *TimestampedEntry `tls:"selector:LeafType,val:0"`
}
// Precertificate represents the parsed CT Precertificate structure.
type Precertificate struct {
// DER-encoded pre-certificate as originally added, which includes a
// poison extension and a signature generated over the pre-cert by
// the pre-cert issuer (which might differ from the issuer of the final
// cert, see RFC6962 s3.1).
Submitted ASN1Cert
// SHA256 hash of the issuing key
IssuerKeyHash [sha256.Size]byte
// Parsed TBSCertificate structure, held in an x509.Certificate for convenience.
TBSCertificate *x509.Certificate
}
// X509Certificate returns the X.509 Certificate contained within the
// MerkleTreeLeaf.
func (m *MerkleTreeLeaf) X509Certificate() (*x509.Certificate, error) {
if m.TimestampedEntry.EntryType != X509LogEntryType {
return nil, fmt.Errorf("cannot call X509Certificate on a MerkleTreeLeaf that is not an X509 entry")
}
return x509.ParseCertificate(m.TimestampedEntry.X509Entry.Data)
}
// Precertificate returns the X.509 Precertificate contained within the MerkleTreeLeaf.
//
// The returned precertificate is embedded in an x509.Certificate, but is in the
// form stored internally in the log rather than the original submitted form
// (i.e. it does not include the poison extension and any changes to reflect the
// final certificate's issuer have been made; see x509.BuildPrecertTBS).
func (m *MerkleTreeLeaf) Precertificate() (*x509.Certificate, error) {
if m.TimestampedEntry.EntryType != PrecertLogEntryType {
return nil, fmt.Errorf("cannot call Precertificate on a MerkleTreeLeaf that is not a precert entry")
}
return x509.ParseTBSCertificate(m.TimestampedEntry.PrecertEntry.TBSCertificate)
}
// APIEndpoint is a string that represents one of the Certificate Transparency
// Log API endpoints.
type APIEndpoint string
// Certificate Transparency Log API endpoints; see section 4.
// WARNING: Should match the URI paths without the "/ct/v1/" prefix. If
// changing these constants, may need to change those too.
const (
AddChainStr APIEndpoint = "add-chain"
AddPreChainStr APIEndpoint = "add-pre-chain"
GetSTHStr APIEndpoint = "get-sth"
GetEntriesStr APIEndpoint = "get-entries"
GetProofByHashStr APIEndpoint = "get-proof-by-hash"
GetSTHConsistencyStr APIEndpoint = "get-sth-consistency"
GetRootsStr APIEndpoint = "get-roots"
GetEntryAndProofStr APIEndpoint = "get-entry-and-proof"
)
// URI paths for Log requests; see section 4.
// WARNING: Should match the API endpoints, with the "/ct/v1/" prefix. If
// changing these constants, may need to change those too.
const (
AddChainPath = "/ct/v1/add-chain"
AddPreChainPath = "/ct/v1/add-pre-chain"
GetSTHPath = "/ct/v1/get-sth"
GetEntriesPath = "/ct/v1/get-entries"
GetProofByHashPath = "/ct/v1/get-proof-by-hash"
GetSTHConsistencyPath = "/ct/v1/get-sth-consistency"
GetRootsPath = "/ct/v1/get-roots"
GetEntryAndProofPath = "/ct/v1/get-entry-and-proof"
AddJSONPath = "/ct/v1/add-json" // Experimental addition
)
// AddChainRequest represents the JSON request body sent to the add-chain and
// add-pre-chain POST methods from sections 4.1 and 4.2.
type AddChainRequest struct {
Chain [][]byte `json:"chain"`
}
// AddChainResponse represents the JSON response to the add-chain and
// add-pre-chain POST methods.
// An SCT represents a Log's promise to integrate a [pre-]certificate into the
// log within a defined period of time.
type AddChainResponse struct {
SCTVersion Version `json:"sct_version"` // SCT structure version
ID []byte `json:"id"` // Log ID
Timestamp uint64 `json:"timestamp"` // Timestamp of issuance
Extensions string `json:"extensions"` // Holder for any CT extensions
Signature []byte `json:"signature"` // Log signature for this SCT
}
// AddJSONRequest represents the JSON request body sent to the add-json POST method.
// The corresponding response re-uses AddChainResponse.
// This is an experimental addition not covered by RFC6962.
type AddJSONRequest struct {
Data interface{} `json:"data"`
}
// GetSTHResponse respresents the JSON response to the get-sth GET method from section 4.3.
type GetSTHResponse struct {
TreeSize uint64 `json:"tree_size"` // Number of certs in the current tree
Timestamp uint64 `json:"timestamp"` // Time that the tree was created
SHA256RootHash []byte `json:"sha256_root_hash"` // Root hash of the tree
TreeHeadSignature []byte `json:"tree_head_signature"` // Log signature for this STH
}
// ToSignedTreeHead creates a SignedTreeHead from the GetSTHResponse.
func (r *GetSTHResponse) ToSignedTreeHead() (*SignedTreeHead, error) {
sth := SignedTreeHead{
TreeSize: r.TreeSize,
Timestamp: r.Timestamp,
}
if len(r.SHA256RootHash) != sha256.Size {
return nil, fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(r.SHA256RootHash))
}
copy(sth.SHA256RootHash[:], r.SHA256RootHash)
var ds DigitallySigned
if rest, err := tls.Unmarshal(r.TreeHeadSignature, &ds); err != nil {
return nil, fmt.Errorf("tls.Unmarshal(): %s", err)
} else if len(rest) > 0 {
return nil, fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest))
}
sth.TreeHeadSignature = ds
return &sth, nil
}
// GetSTHConsistencyResponse represents the JSON response to the get-sth-consistency
// GET method from section 4.4. (The corresponding GET request has parameters 'first' and
// 'second'.)
type GetSTHConsistencyResponse struct {
Consistency [][]byte `json:"consistency"`
}
// GetProofByHashResponse represents the JSON response to the get-proof-by-hash GET
// method from section 4.5. (The corresponding GET request has parameters 'hash'
// and 'tree_size'.)
type GetProofByHashResponse struct {
LeafIndex int64 `json:"leaf_index"` // The 0-based index of the end entity corresponding to the "hash" parameter.
AuditPath [][]byte `json:"audit_path"` // An array of base64-encoded Merkle Tree nodes proving the inclusion of the chosen certificate.
}
// LeafEntry represents a leaf in the Log's Merkle tree, as returned by the get-entries
// GET method from section 4.6.
type LeafEntry struct {
// LeafInput is a TLS-encoded MerkleTreeLeaf
LeafInput []byte `json:"leaf_input"`
// ExtraData holds (unsigned) extra data, normally the cert validation chain.
ExtraData []byte `json:"extra_data"`
}
// GetEntriesResponse respresents the JSON response to the get-entries GET method
// from section 4.6.
type GetEntriesResponse struct {
Entries []LeafEntry `json:"entries"` // the list of returned entries
}
// GetRootsResponse represents the JSON response to the get-roots GET method from section 4.7.
type GetRootsResponse struct {
Certificates []string `json:"certificates"`
}
// GetEntryAndProofResponse represents the JSON response to the get-entry-and-proof
// GET method from section 4.8. (The corresponding GET request has parameters 'leaf_index'
// and 'tree_size'.)
type GetEntryAndProofResponse struct {
LeafInput []byte `json:"leaf_input"` // the entry itself
ExtraData []byte `json:"extra_data"` // any chain provided when the entry was added to the log
AuditPath [][]byte `json:"audit_path"` // the corresponding proof
}

View File

@ -1,82 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"cert_pool.go",
"curves.go",
"error.go",
"errors.go",
"names.go",
"nilref_nil_darwin.go",
"nilref_zero_darwin.go",
"pem_decrypt.go",
"pkcs1.go",
"pkcs8.go",
"ptr_sysptr_windows.go",
"ptr_uint_windows.go",
"revoked.go",
"root.go",
"root_bsd.go",
"root_cgo_darwin.go",
"root_darwin.go",
"root_darwin_armx.go",
"root_linux.go",
"root_nacl.go",
"root_nocgo_darwin.go",
"root_plan9.go",
"root_solaris.go",
"root_unix.go",
"root_windows.go",
"rpki.go",
"sec1.go",
"verify.go",
"x509.go",
],
cgo = True,
clinkopts = select({
"@io_bazel_rules_go//go/platform:darwin_386": [
"-framework CoreFoundation -framework Security",
],
"@io_bazel_rules_go//go/platform:darwin_amd64": [
"-framework CoreFoundation -framework Security",
],
"//conditions:default": [],
}),
copts = select({
"@io_bazel_rules_go//go/platform:darwin_386": [
"-mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080",
],
"@io_bazel_rules_go//go/platform:darwin_amd64": [
"-mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080",
],
"//conditions:default": [],
}),
importmap = "k8s.io/kubernetes/vendor/github.com/google/certificate-transparency-go/x509",
importpath = "github.com/google/certificate-transparency-go/x509",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/google/certificate-transparency-go/asn1:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/tls:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/x509/pkix:go_default_library",
"//vendor/golang.org/x/crypto/cryptobyte:go_default_library",
"//vendor/golang.org/x/crypto/cryptobyte/asn1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/google/certificate-transparency-go/x509/pkix:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,143 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"encoding/pem"
"errors"
"runtime"
)
// CertPool is a set of certificates.
type CertPool struct {
bySubjectKeyId map[string][]int
byName map[string][]int
certs []*Certificate
}
// NewCertPool returns a new, empty CertPool.
func NewCertPool() *CertPool {
return &CertPool{
bySubjectKeyId: make(map[string][]int),
byName: make(map[string][]int),
}
}
// SystemCertPool returns a copy of the system cert pool.
//
// Any mutations to the returned pool are not written to disk and do
// not affect any other pool.
func SystemCertPool() (*CertPool, error) {
if runtime.GOOS == "windows" {
// Issue 16736, 18609:
return nil, errors.New("crypto/x509: system root pool is not available on Windows")
}
return loadSystemRoots()
}
// findVerifiedParents attempts to find certificates in s which have signed the
// given certificate. If any candidates were rejected then errCert will be set
// to one of them, arbitrarily, and err will contain the reason that it was
// rejected.
func (s *CertPool) findVerifiedParents(cert *Certificate) (parents []int, errCert *Certificate, err error) {
if s == nil {
return
}
var candidates []int
if len(cert.AuthorityKeyId) > 0 {
candidates = s.bySubjectKeyId[string(cert.AuthorityKeyId)]
}
if len(candidates) == 0 {
candidates = s.byName[string(cert.RawIssuer)]
}
for _, c := range candidates {
if err = cert.CheckSignatureFrom(s.certs[c]); err == nil {
parents = append(parents, c)
} else {
errCert = s.certs[c]
}
}
return
}
func (s *CertPool) contains(cert *Certificate) bool {
if s == nil {
return false
}
candidates := s.byName[string(cert.RawSubject)]
for _, c := range candidates {
if s.certs[c].Equal(cert) {
return true
}
}
return false
}
// AddCert adds a certificate to a pool.
func (s *CertPool) AddCert(cert *Certificate) {
if cert == nil {
panic("adding nil Certificate to CertPool")
}
// Check that the certificate isn't being added twice.
if s.contains(cert) {
return
}
n := len(s.certs)
s.certs = append(s.certs, cert)
if len(cert.SubjectKeyId) > 0 {
keyId := string(cert.SubjectKeyId)
s.bySubjectKeyId[keyId] = append(s.bySubjectKeyId[keyId], n)
}
name := string(cert.RawSubject)
s.byName[name] = append(s.byName[name], n)
}
// AppendCertsFromPEM attempts to parse a series of PEM encoded certificates.
// It appends any certificates found to s and reports whether any certificates
// were successfully parsed.
//
// On many Linux systems, /etc/ssl/cert.pem will contain the system wide set
// of root CAs in a format suitable for this function.
func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) {
for len(pemCerts) > 0 {
var block *pem.Block
block, pemCerts = pem.Decode(pemCerts)
if block == nil {
break
}
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
continue
}
cert, err := ParseCertificate(block.Bytes)
if IsFatal(err) {
continue
}
s.AddCert(cert)
ok = true
}
return
}
// Subjects returns a list of the DER-encoded subjects of
// all of the certificates in the pool.
func (s *CertPool) Subjects() [][]byte {
res := make([][]byte, len(s.certs))
for i, c := range s.certs {
res[i] = c.RawSubject
}
return res
}

View File

@ -1,37 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"crypto/elliptic"
"math/big"
"sync"
)
// This file holds ECC curves that are not supported by the main Go crypto/elliptic
// library, but which have been observed in certificates in the wild.
var initonce sync.Once
var p192r1 *elliptic.CurveParams
func initAllCurves() {
initSECP192R1()
}
func initSECP192R1() {
// See SEC-2, section 2.2.2
p192r1 = &elliptic.CurveParams{Name: "P-192"}
p192r1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", 16)
p192r1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831", 16)
p192r1.B, _ = new(big.Int).SetString("64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1", 16)
p192r1.Gx, _ = new(big.Int).SetString("188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012", 16)
p192r1.Gy, _ = new(big.Int).SetString("07192B95FFC8DA78631011ED6B24CDD573F977A11E794811", 16)
p192r1.BitSize = 192
}
func secp192r1() elliptic.Curve {
initonce.Do(initAllCurves)
return p192r1
}

View File

@ -1,230 +0,0 @@
package x509
import (
"bytes"
"fmt"
"strconv"
"strings"
)
// Error implements the error interface and describes a single error in an X.509 certificate or CRL.
type Error struct {
ID ErrorID
Category ErrCategory
Summary string
Field string
SpecRef string
SpecText string
// Fatal indicates that parsing has been aborted.
Fatal bool
}
func (err Error) Error() string {
var msg bytes.Buffer
if err.ID != ErrInvalidID {
if err.Fatal {
msg.WriteRune('E')
} else {
msg.WriteRune('W')
}
msg.WriteString(fmt.Sprintf("%03d: ", err.ID))
}
msg.WriteString(err.Summary)
return msg.String()
}
// VerboseError creates a more verbose error string, including spec details.
func (err Error) VerboseError() string {
var msg bytes.Buffer
msg.WriteString(err.Error())
if len(err.Field) > 0 || err.Category != UnknownCategory || len(err.SpecRef) > 0 || len(err.SpecText) > 0 {
msg.WriteString(" (")
needSep := false
if len(err.Field) > 0 {
msg.WriteString(err.Field)
needSep = true
}
if err.Category != UnknownCategory {
if needSep {
msg.WriteString(": ")
}
msg.WriteString(err.Category.String())
needSep = true
}
if len(err.SpecRef) > 0 {
if needSep {
msg.WriteString(": ")
}
msg.WriteString(err.SpecRef)
needSep = true
}
if len(err.SpecText) > 0 {
if needSep {
if len(err.SpecRef) > 0 {
msg.WriteString(", ")
} else {
msg.WriteString(": ")
}
}
msg.WriteString("'")
msg.WriteString(err.SpecText)
msg.WriteString("'")
}
msg.WriteString(")")
}
return msg.String()
}
// ErrCategory indicates the category of an x509.Error.
type ErrCategory int
// ErrCategory values.
const (
UnknownCategory ErrCategory = iota
// Errors in ASN.1 encoding
InvalidASN1Encoding
InvalidASN1Content
InvalidASN1DER
// Errors in ASN.1 relative to schema
InvalidValueRange
InvalidASN1Type
UnexpectedAdditionalData
// Errors in X.509
PoorlyFormedCertificate // Fails a SHOULD clause
MalformedCertificate // Fails a MUST clause
PoorlyFormedCRL // Fails a SHOULD clause
MalformedCRL // Fails a MUST clause
// Errors relative to CA/Browser Forum guidelines
BaselineRequirementsFailure
EVRequirementsFailure
// Other errors
InsecureAlgorithm
UnrecognizedValue
)
func (category ErrCategory) String() string {
switch category {
case InvalidASN1Encoding:
return "Invalid ASN.1 encoding"
case InvalidASN1Content:
return "Invalid ASN.1 content"
case InvalidASN1DER:
return "Invalid ASN.1 distinguished encoding"
case InvalidValueRange:
return "Invalid value for range given in schema"
case InvalidASN1Type:
return "Invalid ASN.1 type for schema"
case UnexpectedAdditionalData:
return "Unexpected additional data present"
case PoorlyFormedCertificate:
return "Certificate does not comply with SHOULD clause in spec"
case MalformedCertificate:
return "Certificate does not comply with MUST clause in spec"
case PoorlyFormedCRL:
return "Certificate Revocation List does not comply with SHOULD clause in spec"
case MalformedCRL:
return "Certificate Revocation List does not comply with MUST clause in spec"
case BaselineRequirementsFailure:
return "Certificate does not comply with CA/BF baseline requirements"
case EVRequirementsFailure:
return "Certificate does not comply with CA/BF EV requirements"
case InsecureAlgorithm:
return "Certificate uses an insecure algorithm"
case UnrecognizedValue:
return "Certificate uses an unrecognized value"
default:
return fmt.Sprintf("Unknown (%d)", category)
}
}
// ErrorID is an identifier for an x509.Error, to allow filtering.
type ErrorID int
// Errors implements the error interface and holds a collection of errors found in a certificate or CRL.
type Errors struct {
Errs []Error
}
// Error converts to a string.
func (e *Errors) Error() string {
return e.combineErrors(Error.Error)
}
// VerboseError creates a more verbose error string, including spec details.
func (e *Errors) VerboseError() string {
return e.combineErrors(Error.VerboseError)
}
// Fatal indicates whether e includes a fatal error
func (e *Errors) Fatal() bool {
return (e.FirstFatal() != nil)
}
// Empty indicates whether e has no errors.
func (e *Errors) Empty() bool {
return len(e.Errs) == 0
}
// FirstFatal returns the first fatal error in e, or nil
// if there is no fatal error.
func (e *Errors) FirstFatal() error {
for _, err := range e.Errs {
if err.Fatal {
return err
}
}
return nil
}
// AddID adds the Error identified by the given id to an x509.Errors.
func (e *Errors) AddID(id ErrorID, args ...interface{}) {
e.Errs = append(e.Errs, NewError(id, args...))
}
func (e Errors) combineErrors(errfn func(Error) string) string {
if len(e.Errs) == 0 {
return ""
}
if len(e.Errs) == 1 {
return errfn((e.Errs)[0])
}
var msg bytes.Buffer
msg.WriteString("Errors:")
for _, err := range e.Errs {
msg.WriteString("\n ")
msg.WriteString(errfn(err))
}
return msg.String()
}
// Filter creates a new Errors object with any entries from the filtered
// list of IDs removed.
func (e Errors) Filter(filtered []ErrorID) Errors {
var results Errors
eloop:
for _, v := range e.Errs {
for _, f := range filtered {
if v.ID == f {
break eloop
}
}
results.Errs = append(results.Errs, v)
}
return results
}
// ErrorFilter builds a list of error IDs (suitable for use with Errors.Filter) from a comma-separated string.
func ErrorFilter(ignore string) []ErrorID {
var ids []ErrorID
filters := strings.Split(ignore, ",")
for _, f := range filters {
v, err := strconv.Atoi(f)
if err != nil {
continue
}
ids = append(ids, ErrorID(v))
}
return ids
}

View File

@ -1,302 +0,0 @@
package x509
import "fmt"
// To preserve error IDs, only append to this list, never insert.
const (
ErrInvalidID ErrorID = iota
ErrInvalidCertList
ErrTrailingCertList
ErrUnexpectedlyCriticalCertListExtension
ErrUnexpectedlyNonCriticalCertListExtension
ErrInvalidCertListAuthKeyID
ErrTrailingCertListAuthKeyID
ErrInvalidCertListIssuerAltName
ErrInvalidCertListCRLNumber
ErrTrailingCertListCRLNumber
ErrNegativeCertListCRLNumber
ErrInvalidCertListDeltaCRL
ErrTrailingCertListDeltaCRL
ErrNegativeCertListDeltaCRL
ErrInvalidCertListIssuingDP
ErrTrailingCertListIssuingDP
ErrCertListIssuingDPMultipleTypes
ErrCertListIssuingDPInvalidFullName
ErrInvalidCertListFreshestCRL
ErrInvalidCertListAuthInfoAccess
ErrTrailingCertListAuthInfoAccess
ErrUnhandledCriticalCertListExtension
ErrUnexpectedlyCriticalRevokedCertExtension
ErrUnexpectedlyNonCriticalRevokedCertExtension
ErrInvalidRevocationReason
ErrTrailingRevocationReason
ErrInvalidRevocationInvalidityDate
ErrTrailingRevocationInvalidityDate
ErrInvalidRevocationIssuer
ErrUnhandledCriticalRevokedCertExtension
ErrMaxID
)
// idToError gives a template x509.Error for each defined ErrorID; where the Summary
// field may hold format specifiers that take field parameters.
var idToError map[ErrorID]Error
var errorInfo = []Error{
{
ID: ErrInvalidCertList,
Summary: "x509: failed to parse CertificateList: %v",
Field: "CertificateList",
SpecRef: "RFC 5280 s5.1",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingCertList,
Summary: "x509: trailing data after CertificateList",
Field: "CertificateList",
SpecRef: "RFC 5280 s5.1",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrUnexpectedlyCriticalCertListExtension,
Summary: "x509: certificate list extension %v marked critical but expected to be non-critical",
Field: "tbsCertList.crlExtensions.*.critical",
SpecRef: "RFC 5280 s5.2",
Category: MalformedCRL,
},
{
ID: ErrUnexpectedlyNonCriticalCertListExtension,
Summary: "x509: certificate list extension %v marked non-critical but expected to be critical",
Field: "tbsCertList.crlExtensions.*.critical",
SpecRef: "RFC 5280 s5.2",
Category: MalformedCRL,
},
{
ID: ErrInvalidCertListAuthKeyID,
Summary: "x509: failed to unmarshal certificate-list authority key-id: %v",
Field: "tbsCertList.crlExtensions.*.AuthorityKeyIdentifier",
SpecRef: "RFC 5280 s5.2.1",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingCertListAuthKeyID,
Summary: "x509: trailing data after certificate list auth key ID",
Field: "tbsCertList.crlExtensions.*.AuthorityKeyIdentifier",
SpecRef: "RFC 5280 s5.2.1",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrInvalidCertListIssuerAltName,
Summary: "x509: failed to parse CRL issuer alt name: %v",
Field: "tbsCertList.crlExtensions.*.IssuerAltName",
SpecRef: "RFC 5280 s5.2.2",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrInvalidCertListCRLNumber,
Summary: "x509: failed to unmarshal certificate-list crl-number: %v",
Field: "tbsCertList.crlExtensions.*.CRLNumber",
SpecRef: "RFC 5280 s5.2.3",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingCertListCRLNumber,
Summary: "x509: trailing data after certificate list crl-number",
Field: "tbsCertList.crlExtensions.*.CRLNumber",
SpecRef: "RFC 5280 s5.2.3",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrNegativeCertListCRLNumber,
Summary: "x509: negative certificate list crl-number: %d",
Field: "tbsCertList.crlExtensions.*.CRLNumber",
SpecRef: "RFC 5280 s5.2.3",
Category: MalformedCRL,
Fatal: true,
},
{
ID: ErrInvalidCertListDeltaCRL,
Summary: "x509: failed to unmarshal certificate-list delta-crl: %v",
Field: "tbsCertList.crlExtensions.*.BaseCRLNumber",
SpecRef: "RFC 5280 s5.2.4",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingCertListDeltaCRL,
Summary: "x509: trailing data after certificate list delta-crl",
Field: "tbsCertList.crlExtensions.*.BaseCRLNumber",
SpecRef: "RFC 5280 s5.2.4",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrNegativeCertListDeltaCRL,
Summary: "x509: negative certificate list base-crl-number: %d",
Field: "tbsCertList.crlExtensions.*.BaseCRLNumber",
SpecRef: "RFC 5280 s5.2.4",
Category: MalformedCRL,
Fatal: true,
},
{
ID: ErrInvalidCertListIssuingDP,
Summary: "x509: failed to unmarshal certificate list issuing distribution point: %v",
Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint",
SpecRef: "RFC 5280 s5.2.5",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingCertListIssuingDP,
Summary: "x509: trailing data after certificate list issuing distribution point",
Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint",
SpecRef: "RFC 5280 s5.2.5",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrCertListIssuingDPMultipleTypes,
Summary: "x509: multiple cert types set in issuing-distribution-point: user:%v CA:%v attr:%v",
Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint",
SpecRef: "RFC 5280 s5.2.5",
SpecText: "at most one of onlyContainsUserCerts, onlyContainsCACerts, and onlyContainsAttributeCerts may be set to TRUE.",
Category: MalformedCRL,
Fatal: true,
},
{
ID: ErrCertListIssuingDPInvalidFullName,
Summary: "x509: failed to parse CRL issuing-distribution-point fullName: %v",
Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint.distributionPoint",
SpecRef: "RFC 5280 s5.2.5",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrInvalidCertListFreshestCRL,
Summary: "x509: failed to unmarshal certificate list freshestCRL: %v",
Field: "tbsCertList.crlExtensions.*.FreshestCRL",
SpecRef: "RFC 5280 s5.2.6",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrInvalidCertListAuthInfoAccess,
Summary: "x509: failed to unmarshal certificate list authority info access: %v",
Field: "tbsCertList.crlExtensions.*.AuthorityInfoAccess",
SpecRef: "RFC 5280 s5.2.7",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingCertListAuthInfoAccess,
Summary: "x509: trailing data after certificate list authority info access",
Field: "tbsCertList.crlExtensions.*.AuthorityInfoAccess",
SpecRef: "RFC 5280 s5.2.7",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrUnhandledCriticalCertListExtension,
Summary: "x509: unhandled critical extension in certificate list: %v",
Field: "tbsCertList.revokedCertificates.crlExtensions.*",
SpecRef: "RFC 5280 s5.2",
SpecText: "If a CRL contains a critical extension that the application cannot process, then the application MUST NOT use that CRL to determine the status of certificates.",
Category: MalformedCRL,
Fatal: true,
},
{
ID: ErrUnexpectedlyCriticalRevokedCertExtension,
Summary: "x509: revoked certificate extension %v marked critical but expected to be non-critical",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.critical",
SpecRef: "RFC 5280 s5.3",
Category: MalformedCRL,
},
{
ID: ErrUnexpectedlyNonCriticalRevokedCertExtension,
Summary: "x509: revoked certificate extension %v marked non-critical but expected to be critical",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.critical",
SpecRef: "RFC 5280 s5.3",
Category: MalformedCRL,
},
{
ID: ErrInvalidRevocationReason,
Summary: "x509: failed to parse revocation reason: %v",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CRLReason",
SpecRef: "RFC 5280 s5.3.1",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingRevocationReason,
Summary: "x509: trailing data after revoked certificate reason",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CRLReason",
SpecRef: "RFC 5280 s5.3.1",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrInvalidRevocationInvalidityDate,
Summary: "x509: failed to parse revoked certificate invalidity date: %v",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.InvalidityDate",
SpecRef: "RFC 5280 s5.3.2",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingRevocationInvalidityDate,
Summary: "x509: trailing data after revoked certificate invalidity date",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.InvalidityDate",
SpecRef: "RFC 5280 s5.3.2",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrInvalidRevocationIssuer,
Summary: "x509: failed to parse revocation issuer %v",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CertificateIssuer",
SpecRef: "RFC 5280 s5.3.3",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrUnhandledCriticalRevokedCertExtension,
Summary: "x509: unhandled critical extension in revoked certificate: %v",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*",
SpecRef: "RFC 5280 s5.3",
SpecText: "If a CRL contains a critical CRL entry extension that the application cannot process, then the application MUST NOT use that CRL to determine the status of any certificates.",
Category: MalformedCRL,
Fatal: true,
},
}
func init() {
idToError = make(map[ErrorID]Error, len(errorInfo))
for _, info := range errorInfo {
idToError[info.ID] = info
}
}
// NewError builds a new x509.Error based on the template for the given id.
func NewError(id ErrorID, args ...interface{}) Error {
var err Error
if id >= ErrMaxID {
err.ID = id
err.Summary = fmt.Sprintf("Unknown error ID %v: args %+v", id, args)
err.Fatal = true
} else {
err = idToError[id]
err.Summary = fmt.Sprintf(err.Summary, args...)
}
return err
}

View File

@ -1,164 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"fmt"
"net"
"github.com/google/certificate-transparency-go/asn1"
"github.com/google/certificate-transparency-go/x509/pkix"
)
const (
// GeneralName tag values from RFC 5280, 4.2.1.6
tagOtherName = 0
tagRFC822Name = 1
tagDNSName = 2
tagX400Address = 3
tagDirectoryName = 4
tagEDIPartyName = 5
tagURI = 6
tagIPAddress = 7
tagRegisteredID = 8
)
// OtherName describes a name related to a certificate which is not in one
// of the standard name formats. RFC 5280, 4.2.1.6:
// OtherName ::= SEQUENCE {
// type-id OBJECT IDENTIFIER,
// value [0] EXPLICIT ANY DEFINED BY type-id }
type OtherName struct {
TypeID asn1.ObjectIdentifier
Value asn1.RawValue
}
// GeneralNames holds a collection of names related to a certificate.
type GeneralNames struct {
DNSNames []string
EmailAddresses []string
DirectoryNames []pkix.Name
URIs []string
IPNets []net.IPNet
RegisteredIDs []asn1.ObjectIdentifier
OtherNames []OtherName
}
// Len returns the total number of names in a GeneralNames object.
func (gn GeneralNames) Len() int {
return (len(gn.DNSNames) + len(gn.EmailAddresses) + len(gn.DirectoryNames) +
len(gn.URIs) + len(gn.IPNets) + len(gn.RegisteredIDs) + len(gn.OtherNames))
}
// Empty indicates whether a GeneralNames object is empty.
func (gn GeneralNames) Empty() bool {
return gn.Len() == 0
}
func parseGeneralNames(value []byte, gname *GeneralNames) error {
// RFC 5280, 4.2.1.6
// GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName
//
// GeneralName ::= CHOICE {
// otherName [0] OtherName,
// rfc822Name [1] IA5String,
// dNSName [2] IA5String,
// x400Address [3] ORAddress,
// directoryName [4] Name,
// ediPartyName [5] EDIPartyName,
// uniformResourceIdentifier [6] IA5String,
// iPAddress [7] OCTET STRING,
// registeredID [8] OBJECT IDENTIFIER }
var seq asn1.RawValue
var rest []byte
if rest, err := asn1.Unmarshal(value, &seq); err != nil {
return fmt.Errorf("x509: failed to parse GeneralNames: %v", err)
} else if len(rest) != 0 {
return fmt.Errorf("x509: trailing data after GeneralNames")
}
if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal {
return fmt.Errorf("x509: failed to parse GeneralNames sequence, tag %+v", seq)
}
rest = seq.Bytes
for len(rest) > 0 {
var err error
rest, err = parseGeneralName(rest, gname, false)
if err != nil {
return fmt.Errorf("x509: failed to parse GeneralName: %v", err)
}
}
return nil
}
func parseGeneralName(data []byte, gname *GeneralNames, withMask bool) ([]byte, error) {
var v asn1.RawValue
var rest []byte
var err error
rest, err = asn1.Unmarshal(data, &v)
if err != nil {
return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames: %v", err)
}
switch v.Tag {
case tagOtherName:
if !v.IsCompound {
return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.otherName: not compound")
}
var other OtherName
v.FullBytes = append([]byte{}, v.FullBytes...)
v.FullBytes[0] = asn1.TagSequence | 0x20
_, err = asn1.Unmarshal(v.FullBytes, &other)
if err != nil {
return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.otherName: %v", err)
}
gname.OtherNames = append(gname.OtherNames, other)
case tagRFC822Name:
gname.EmailAddresses = append(gname.EmailAddresses, string(v.Bytes))
case tagDNSName:
dns := string(v.Bytes)
gname.DNSNames = append(gname.DNSNames, dns)
case tagDirectoryName:
var rdnSeq pkix.RDNSequence
if _, err := asn1.Unmarshal(v.Bytes, &rdnSeq); err != nil {
return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.directoryName: %v", err)
}
var dirName pkix.Name
dirName.FillFromRDNSequence(&rdnSeq)
gname.DirectoryNames = append(gname.DirectoryNames, dirName)
case tagURI:
gname.URIs = append(gname.URIs, string(v.Bytes))
case tagIPAddress:
vlen := len(v.Bytes)
if withMask {
switch vlen {
case (2 * net.IPv4len), (2 * net.IPv6len):
ipNet := net.IPNet{IP: v.Bytes[0 : vlen/2], Mask: v.Bytes[vlen/2:]}
gname.IPNets = append(gname.IPNets, ipNet)
default:
return nil, fmt.Errorf("x509: invalid IP/mask length %d in GeneralNames.iPAddress", vlen)
}
} else {
switch vlen {
case net.IPv4len, net.IPv6len:
ipNet := net.IPNet{IP: v.Bytes}
gname.IPNets = append(gname.IPNets, ipNet)
default:
return nil, fmt.Errorf("x509: invalid IP length %d in GeneralNames.iPAddress", vlen)
}
}
case tagRegisteredID:
var oid asn1.ObjectIdentifier
v.FullBytes = append([]byte{}, v.FullBytes...)
v.FullBytes[0] = asn1.TagOID
_, err = asn1.Unmarshal(v.FullBytes, &oid)
if err != nil {
return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.registeredID: %v", err)
}
gname.RegisteredIDs = append(gname.RegisteredIDs, oid)
default:
return nil, fmt.Errorf("x509: failed to unmarshal GeneralName: unknown tag %d", v.Tag)
}
return rest, nil
}

View File

@ -1,26 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build cgo,!arm,!arm64,!ios,!go1.10
package x509
/*
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080
#cgo LDFLAGS: -framework CoreFoundation -framework Security
#include <CoreFoundation/CoreFoundation.h>
*/
import "C"
// For Go versions before 1.10, nil values for Apple's CoreFoundation
// CF*Ref types were represented by nil. See:
// https://github.com/golang/go/commit/b868616b63a8
func setNilCFRef(v *C.CFDataRef) {
*v = nil
}
func isNilCFRef(v C.CFDataRef) bool {
return v == nil
}

View File

@ -1,26 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build cgo,!arm,!arm64,!ios,go1.10
package x509
/*
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080
#cgo LDFLAGS: -framework CoreFoundation -framework Security
#include <CoreFoundation/CoreFoundation.h>
*/
import "C"
// For Go versions >= 1.10, nil values for Apple's CoreFoundation
// CF*Ref types are represented by zero. See:
// https://github.com/golang/go/commit/b868616b63a8
func setNilCFRef(v *C.CFDataRef) {
*v = 0
}
func isNilCFRef(v C.CFDataRef) bool {
return v == 0
}

View File

@ -1,240 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
// RFC 1423 describes the encryption of PEM blocks. The algorithm used to
// generate a key from the password was derived by looking at the OpenSSL
// implementation.
import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
"crypto/md5"
"encoding/hex"
"encoding/pem"
"errors"
"io"
"strings"
)
type PEMCipher int
// Possible values for the EncryptPEMBlock encryption algorithm.
const (
_ PEMCipher = iota
PEMCipherDES
PEMCipher3DES
PEMCipherAES128
PEMCipherAES192
PEMCipherAES256
)
// rfc1423Algo holds a method for enciphering a PEM block.
type rfc1423Algo struct {
cipher PEMCipher
name string
cipherFunc func(key []byte) (cipher.Block, error)
keySize int
blockSize int
}
// rfc1423Algos holds a slice of the possible ways to encrypt a PEM
// block. The ivSize numbers were taken from the OpenSSL source.
var rfc1423Algos = []rfc1423Algo{{
cipher: PEMCipherDES,
name: "DES-CBC",
cipherFunc: des.NewCipher,
keySize: 8,
blockSize: des.BlockSize,
}, {
cipher: PEMCipher3DES,
name: "DES-EDE3-CBC",
cipherFunc: des.NewTripleDESCipher,
keySize: 24,
blockSize: des.BlockSize,
}, {
cipher: PEMCipherAES128,
name: "AES-128-CBC",
cipherFunc: aes.NewCipher,
keySize: 16,
blockSize: aes.BlockSize,
}, {
cipher: PEMCipherAES192,
name: "AES-192-CBC",
cipherFunc: aes.NewCipher,
keySize: 24,
blockSize: aes.BlockSize,
}, {
cipher: PEMCipherAES256,
name: "AES-256-CBC",
cipherFunc: aes.NewCipher,
keySize: 32,
blockSize: aes.BlockSize,
},
}
// deriveKey uses a key derivation function to stretch the password into a key
// with the number of bits our cipher requires. This algorithm was derived from
// the OpenSSL source.
func (c rfc1423Algo) deriveKey(password, salt []byte) []byte {
hash := md5.New()
out := make([]byte, c.keySize)
var digest []byte
for i := 0; i < len(out); i += len(digest) {
hash.Reset()
hash.Write(digest)
hash.Write(password)
hash.Write(salt)
digest = hash.Sum(digest[:0])
copy(out[i:], digest)
}
return out
}
// IsEncryptedPEMBlock returns if the PEM block is password encrypted.
func IsEncryptedPEMBlock(b *pem.Block) bool {
_, ok := b.Headers["DEK-Info"]
return ok
}
// IncorrectPasswordError is returned when an incorrect password is detected.
var IncorrectPasswordError = errors.New("x509: decryption password incorrect")
// DecryptPEMBlock takes a password encrypted PEM block and the password used to
// encrypt it and returns a slice of decrypted DER encoded bytes. It inspects
// the DEK-Info header to determine the algorithm used for decryption. If no
// DEK-Info header is present, an error is returned. If an incorrect password
// is detected an IncorrectPasswordError is returned. Because of deficiencies
// in the encrypted-PEM format, it's not always possible to detect an incorrect
// password. In these cases no error will be returned but the decrypted DER
// bytes will be random noise.
func DecryptPEMBlock(b *pem.Block, password []byte) ([]byte, error) {
dek, ok := b.Headers["DEK-Info"]
if !ok {
return nil, errors.New("x509: no DEK-Info header in block")
}
idx := strings.Index(dek, ",")
if idx == -1 {
return nil, errors.New("x509: malformed DEK-Info header")
}
mode, hexIV := dek[:idx], dek[idx+1:]
ciph := cipherByName(mode)
if ciph == nil {
return nil, errors.New("x509: unknown encryption mode")
}
iv, err := hex.DecodeString(hexIV)
if err != nil {
return nil, err
}
if len(iv) != ciph.blockSize {
return nil, errors.New("x509: incorrect IV size")
}
// Based on the OpenSSL implementation. The salt is the first 8 bytes
// of the initialization vector.
key := ciph.deriveKey(password, iv[:8])
block, err := ciph.cipherFunc(key)
if err != nil {
return nil, err
}
if len(b.Bytes)%block.BlockSize() != 0 {
return nil, errors.New("x509: encrypted PEM data is not a multiple of the block size")
}
data := make([]byte, len(b.Bytes))
dec := cipher.NewCBCDecrypter(block, iv)
dec.CryptBlocks(data, b.Bytes)
// Blocks are padded using a scheme where the last n bytes of padding are all
// equal to n. It can pad from 1 to blocksize bytes inclusive. See RFC 1423.
// For example:
// [x y z 2 2]
// [x y 7 7 7 7 7 7 7]
// If we detect a bad padding, we assume it is an invalid password.
dlen := len(data)
if dlen == 0 || dlen%ciph.blockSize != 0 {
return nil, errors.New("x509: invalid padding")
}
last := int(data[dlen-1])
if dlen < last {
return nil, IncorrectPasswordError
}
if last == 0 || last > ciph.blockSize {
return nil, IncorrectPasswordError
}
for _, val := range data[dlen-last:] {
if int(val) != last {
return nil, IncorrectPasswordError
}
}
return data[:dlen-last], nil
}
// EncryptPEMBlock returns a PEM block of the specified type holding the
// given DER-encoded data encrypted with the specified algorithm and
// password.
func EncryptPEMBlock(rand io.Reader, blockType string, data, password []byte, alg PEMCipher) (*pem.Block, error) {
ciph := cipherByKey(alg)
if ciph == nil {
return nil, errors.New("x509: unknown encryption mode")
}
iv := make([]byte, ciph.blockSize)
if _, err := io.ReadFull(rand, iv); err != nil {
return nil, errors.New("x509: cannot generate IV: " + err.Error())
}
// The salt is the first 8 bytes of the initialization vector,
// matching the key derivation in DecryptPEMBlock.
key := ciph.deriveKey(password, iv[:8])
block, err := ciph.cipherFunc(key)
if err != nil {
return nil, err
}
enc := cipher.NewCBCEncrypter(block, iv)
pad := ciph.blockSize - len(data)%ciph.blockSize
encrypted := make([]byte, len(data), len(data)+pad)
// We could save this copy by encrypting all the whole blocks in
// the data separately, but it doesn't seem worth the additional
// code.
copy(encrypted, data)
// See RFC 1423, section 1.1
for i := 0; i < pad; i++ {
encrypted = append(encrypted, byte(pad))
}
enc.CryptBlocks(encrypted, encrypted)
return &pem.Block{
Type: blockType,
Headers: map[string]string{
"Proc-Type": "4,ENCRYPTED",
"DEK-Info": ciph.name + "," + hex.EncodeToString(iv),
},
Bytes: encrypted,
}, nil
}
func cipherByName(name string) *rfc1423Algo {
for i := range rfc1423Algos {
alg := &rfc1423Algos[i]
if alg.name == name {
return alg
}
}
return nil
}
func cipherByKey(key PEMCipher) *rfc1423Algo {
for i := range rfc1423Algos {
alg := &rfc1423Algos[i]
if alg.cipher == key {
return alg
}
}
return nil
}

View File

@ -1,155 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"crypto/rsa"
"errors"
"math/big"
"github.com/google/certificate-transparency-go/asn1"
)
// pkcs1PrivateKey is a structure which mirrors the PKCS#1 ASN.1 for an RSA private key.
type pkcs1PrivateKey struct {
Version int
N *big.Int
E int
D *big.Int
P *big.Int
Q *big.Int
// We ignore these values, if present, because rsa will calculate them.
Dp *big.Int `asn1:"optional"`
Dq *big.Int `asn1:"optional"`
Qinv *big.Int `asn1:"optional"`
AdditionalPrimes []pkcs1AdditionalRSAPrime `asn1:"optional,omitempty"`
}
type pkcs1AdditionalRSAPrime struct {
Prime *big.Int
// We ignore these values because rsa will calculate them.
Exp *big.Int
Coeff *big.Int
}
// pkcs1PublicKey reflects the ASN.1 structure of a PKCS#1 public key.
type pkcs1PublicKey struct {
N *big.Int
E int
}
// ParsePKCS1PrivateKey returns an RSA private key from its ASN.1 PKCS#1 DER encoded form.
func ParsePKCS1PrivateKey(der []byte) (*rsa.PrivateKey, error) {
var priv pkcs1PrivateKey
rest, err := asn1.Unmarshal(der, &priv)
if len(rest) > 0 {
return nil, asn1.SyntaxError{Msg: "trailing data"}
}
if err != nil {
return nil, err
}
if priv.Version > 1 {
return nil, errors.New("x509: unsupported private key version")
}
if priv.N.Sign() <= 0 || priv.D.Sign() <= 0 || priv.P.Sign() <= 0 || priv.Q.Sign() <= 0 {
return nil, errors.New("x509: private key contains zero or negative value")
}
key := new(rsa.PrivateKey)
key.PublicKey = rsa.PublicKey{
E: priv.E,
N: priv.N,
}
key.D = priv.D
key.Primes = make([]*big.Int, 2+len(priv.AdditionalPrimes))
key.Primes[0] = priv.P
key.Primes[1] = priv.Q
for i, a := range priv.AdditionalPrimes {
if a.Prime.Sign() <= 0 {
return nil, errors.New("x509: private key contains zero or negative prime")
}
key.Primes[i+2] = a.Prime
// We ignore the other two values because rsa will calculate
// them as needed.
}
err = key.Validate()
if err != nil {
return nil, err
}
key.Precompute()
return key, nil
}
// MarshalPKCS1PrivateKey converts a private key to ASN.1 DER encoded form.
func MarshalPKCS1PrivateKey(key *rsa.PrivateKey) []byte {
key.Precompute()
version := 0
if len(key.Primes) > 2 {
version = 1
}
priv := pkcs1PrivateKey{
Version: version,
N: key.N,
E: key.PublicKey.E,
D: key.D,
P: key.Primes[0],
Q: key.Primes[1],
Dp: key.Precomputed.Dp,
Dq: key.Precomputed.Dq,
Qinv: key.Precomputed.Qinv,
}
priv.AdditionalPrimes = make([]pkcs1AdditionalRSAPrime, len(key.Precomputed.CRTValues))
for i, values := range key.Precomputed.CRTValues {
priv.AdditionalPrimes[i].Prime = key.Primes[2+i]
priv.AdditionalPrimes[i].Exp = values.Exp
priv.AdditionalPrimes[i].Coeff = values.Coeff
}
b, _ := asn1.Marshal(priv)
return b
}
// ParsePKCS1PublicKey parses a PKCS#1 public key in ASN.1 DER form.
func ParsePKCS1PublicKey(der []byte) (*rsa.PublicKey, error) {
var pub pkcs1PublicKey
rest, err := asn1.Unmarshal(der, &pub)
if err != nil {
return nil, err
}
if len(rest) > 0 {
return nil, asn1.SyntaxError{Msg: "trailing data"}
}
if pub.N.Sign() <= 0 || pub.E <= 0 {
return nil, errors.New("x509: public key contains zero or negative value")
}
if pub.E > 1<<31-1 {
return nil, errors.New("x509: public key contains large public exponent")
}
return &rsa.PublicKey{
E: pub.E,
N: pub.N,
}, nil
}
// MarshalPKCS1PublicKey converts an RSA public key to PKCS#1, ASN.1 DER form.
func MarshalPKCS1PublicKey(key *rsa.PublicKey) []byte {
derBytes, _ := asn1.Marshal(pkcs1PublicKey{
N: key.N,
E: key.E,
})
return derBytes
}

View File

@ -1,102 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"crypto/ecdsa"
"crypto/rsa"
"errors"
"fmt"
"github.com/google/certificate-transparency-go/asn1"
"github.com/google/certificate-transparency-go/x509/pkix"
)
// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See
// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn
// and RFC 5208.
type pkcs8 struct {
Version int
Algo pkix.AlgorithmIdentifier
PrivateKey []byte
// optional attributes omitted.
}
// ParsePKCS8PrivateKey parses an unencrypted, PKCS#8 private key.
// See RFC 5208.
func ParsePKCS8PrivateKey(der []byte) (key interface{}, err error) {
var privKey pkcs8
if _, err := asn1.Unmarshal(der, &privKey); err != nil {
return nil, err
}
switch {
case privKey.Algo.Algorithm.Equal(OIDPublicKeyRSA):
key, err = ParsePKCS1PrivateKey(privKey.PrivateKey)
if err != nil {
return nil, errors.New("x509: failed to parse RSA private key embedded in PKCS#8: " + err.Error())
}
return key, nil
case privKey.Algo.Algorithm.Equal(OIDPublicKeyECDSA):
bytes := privKey.Algo.Parameters.FullBytes
namedCurveOID := new(asn1.ObjectIdentifier)
if _, err := asn1.Unmarshal(bytes, namedCurveOID); err != nil {
namedCurveOID = nil
}
key, err = parseECPrivateKey(namedCurveOID, privKey.PrivateKey)
if err != nil {
return nil, errors.New("x509: failed to parse EC private key embedded in PKCS#8: " + err.Error())
}
return key, nil
default:
return nil, fmt.Errorf("x509: PKCS#8 wrapping contained private key with unknown algorithm: %v", privKey.Algo.Algorithm)
}
}
// MarshalPKCS8PrivateKey converts a private key to PKCS#8 encoded form.
// The following key types are supported: *rsa.PrivateKey, *ecdsa.PublicKey.
// Unsupported key types result in an error.
//
// See RFC 5208.
func MarshalPKCS8PrivateKey(key interface{}) ([]byte, error) {
var privKey pkcs8
switch k := key.(type) {
case *rsa.PrivateKey:
privKey.Algo = pkix.AlgorithmIdentifier{
Algorithm: OIDPublicKeyRSA,
Parameters: asn1.NullRawValue,
}
privKey.PrivateKey = MarshalPKCS1PrivateKey(k)
case *ecdsa.PrivateKey:
oid, ok := OIDFromNamedCurve(k.Curve)
if !ok {
return nil, errors.New("x509: unknown curve while marshalling to PKCS#8")
}
oidBytes, err := asn1.Marshal(oid)
if err != nil {
return nil, errors.New("x509: failed to marshal curve OID: " + err.Error())
}
privKey.Algo = pkix.AlgorithmIdentifier{
Algorithm: OIDPublicKeyECDSA,
Parameters: asn1.RawValue{
FullBytes: oidBytes,
},
}
if privKey.PrivateKey, err = marshalECPrivateKeyWithOID(k, nil); err != nil {
return nil, errors.New("x509: failed to marshal EC private key while building PKCS#8: " + err.Error())
}
default:
return nil, fmt.Errorf("x509: unknown key type while marshalling PKCS#8: %T", key)
}
return asn1.Marshal(privKey)
}

View File

@ -1,24 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["pkix.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/google/certificate-transparency-go/x509/pkix",
importpath = "github.com/google/certificate-transparency-go/x509/pkix",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/google/certificate-transparency-go/asn1:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,288 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pkix contains shared, low level structures used for ASN.1 parsing
// and serialization of X.509 certificates, CRL and OCSP.
package pkix
import (
// START CT CHANGES
"encoding/hex"
"fmt"
"github.com/google/certificate-transparency-go/asn1"
// END CT CHANGES
"math/big"
"time"
)
// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
// 5280, section 4.1.1.2.
type AlgorithmIdentifier struct {
Algorithm asn1.ObjectIdentifier
Parameters asn1.RawValue `asn1:"optional"`
}
type RDNSequence []RelativeDistinguishedNameSET
var attributeTypeNames = map[string]string{
"2.5.4.6": "C",
"2.5.4.10": "O",
"2.5.4.11": "OU",
"2.5.4.3": "CN",
"2.5.4.5": "SERIALNUMBER",
"2.5.4.7": "L",
"2.5.4.8": "ST",
"2.5.4.9": "STREET",
"2.5.4.17": "POSTALCODE",
}
// String returns a string representation of the sequence r,
// roughly following the RFC 2253 Distinguished Names syntax.
func (r RDNSequence) String() string {
s := ""
for i := 0; i < len(r); i++ {
rdn := r[len(r)-1-i]
if i > 0 {
s += ","
}
for j, tv := range rdn {
if j > 0 {
s += "+"
}
oidString := tv.Type.String()
typeName, ok := attributeTypeNames[oidString]
if !ok {
derBytes, err := asn1.Marshal(tv.Value)
if err == nil {
s += oidString + "=#" + hex.EncodeToString(derBytes)
continue // No value escaping necessary.
}
typeName = oidString
}
valueString := fmt.Sprint(tv.Value)
escaped := make([]rune, 0, len(valueString))
for k, c := range valueString {
escape := false
switch c {
case ',', '+', '"', '\\', '<', '>', ';':
escape = true
case ' ':
escape = k == 0 || k == len(valueString)-1
case '#':
escape = k == 0
}
if escape {
escaped = append(escaped, '\\', c)
} else {
escaped = append(escaped, c)
}
}
s += typeName + "=" + string(escaped)
}
}
return s
}
type RelativeDistinguishedNameSET []AttributeTypeAndValue
// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
// http://tools.ietf.org/html/rfc5280#section-4.1.2.4
type AttributeTypeAndValue struct {
Type asn1.ObjectIdentifier
Value interface{}
}
// AttributeTypeAndValueSET represents a set of ASN.1 sequences of
// AttributeTypeAndValue sequences from RFC 2986 (PKCS #10).
type AttributeTypeAndValueSET struct {
Type asn1.ObjectIdentifier
Value [][]AttributeTypeAndValue `asn1:"set"`
}
// Extension represents the ASN.1 structure of the same name. See RFC
// 5280, section 4.2.
type Extension struct {
Id asn1.ObjectIdentifier
Critical bool `asn1:"optional"`
Value []byte
}
// Name represents an X.509 distinguished name. This only includes the common
// elements of a DN. When parsing, all elements are stored in Names and
// non-standard elements can be extracted from there. When marshaling, elements
// in ExtraNames are appended and override other values with the same OID.
type Name struct {
Country, Organization, OrganizationalUnit []string
Locality, Province []string
StreetAddress, PostalCode []string
SerialNumber, CommonName string
Names []AttributeTypeAndValue
ExtraNames []AttributeTypeAndValue
}
func (n *Name) FillFromRDNSequence(rdns *RDNSequence) {
for _, rdn := range *rdns {
if len(rdn) == 0 {
continue
}
for _, atv := range rdn {
n.Names = append(n.Names, atv)
value, ok := atv.Value.(string)
if !ok {
continue
}
t := atv.Type
if len(t) == 4 && t[0] == OIDAttribute[0] && t[1] == OIDAttribute[1] && t[2] == OIDAttribute[2] {
switch t[3] {
case OIDCommonName[3]:
n.CommonName = value
case OIDSerialNumber[3]:
n.SerialNumber = value
case OIDCountry[3]:
n.Country = append(n.Country, value)
case OIDLocality[3]:
n.Locality = append(n.Locality, value)
case OIDProvince[3]:
n.Province = append(n.Province, value)
case OIDStreetAddress[3]:
n.StreetAddress = append(n.StreetAddress, value)
case OIDOrganization[3]:
n.Organization = append(n.Organization, value)
case OIDOrganizationalUnit[3]:
n.OrganizationalUnit = append(n.OrganizationalUnit, value)
case OIDPostalCode[3]:
n.PostalCode = append(n.PostalCode, value)
}
}
}
}
}
var (
OIDAttribute = asn1.ObjectIdentifier{2, 5, 4}
OIDCountry = asn1.ObjectIdentifier{2, 5, 4, 6}
OIDOrganization = asn1.ObjectIdentifier{2, 5, 4, 10}
OIDOrganizationalUnit = asn1.ObjectIdentifier{2, 5, 4, 11}
OIDCommonName = asn1.ObjectIdentifier{2, 5, 4, 3}
OIDSerialNumber = asn1.ObjectIdentifier{2, 5, 4, 5}
OIDLocality = asn1.ObjectIdentifier{2, 5, 4, 7}
OIDProvince = asn1.ObjectIdentifier{2, 5, 4, 8}
OIDStreetAddress = asn1.ObjectIdentifier{2, 5, 4, 9}
OIDPostalCode = asn1.ObjectIdentifier{2, 5, 4, 17}
OIDPseudonym = asn1.ObjectIdentifier{2, 5, 4, 65}
OIDTitle = asn1.ObjectIdentifier{2, 5, 4, 12}
OIDDnQualifier = asn1.ObjectIdentifier{2, 5, 4, 46}
OIDName = asn1.ObjectIdentifier{2, 5, 4, 41}
OIDSurname = asn1.ObjectIdentifier{2, 5, 4, 4}
OIDGivenName = asn1.ObjectIdentifier{2, 5, 4, 42}
OIDInitials = asn1.ObjectIdentifier{2, 5, 4, 43}
OIDGenerationQualifier = asn1.ObjectIdentifier{2, 5, 4, 44}
)
// appendRDNs appends a relativeDistinguishedNameSET to the given RDNSequence
// and returns the new value. The relativeDistinguishedNameSET contains an
// attributeTypeAndValue for each of the given values. See RFC 5280, A.1, and
// search for AttributeTypeAndValue.
func (n Name) appendRDNs(in RDNSequence, values []string, oid asn1.ObjectIdentifier) RDNSequence {
if len(values) == 0 || oidInAttributeTypeAndValue(oid, n.ExtraNames) {
return in
}
s := make([]AttributeTypeAndValue, len(values))
for i, value := range values {
s[i].Type = oid
s[i].Value = value
}
return append(in, s)
}
func (n Name) ToRDNSequence() (ret RDNSequence) {
ret = n.appendRDNs(ret, n.Country, OIDCountry)
ret = n.appendRDNs(ret, n.Province, OIDProvince)
ret = n.appendRDNs(ret, n.Locality, OIDLocality)
ret = n.appendRDNs(ret, n.StreetAddress, OIDStreetAddress)
ret = n.appendRDNs(ret, n.PostalCode, OIDPostalCode)
ret = n.appendRDNs(ret, n.Organization, OIDOrganization)
ret = n.appendRDNs(ret, n.OrganizationalUnit, OIDOrganizationalUnit)
if len(n.CommonName) > 0 {
ret = n.appendRDNs(ret, []string{n.CommonName}, OIDCommonName)
}
if len(n.SerialNumber) > 0 {
ret = n.appendRDNs(ret, []string{n.SerialNumber}, OIDSerialNumber)
}
for _, atv := range n.ExtraNames {
ret = append(ret, []AttributeTypeAndValue{atv})
}
return ret
}
// String returns the string form of n, roughly following
// the RFC 2253 Distinguished Names syntax.
func (n Name) String() string {
return n.ToRDNSequence().String()
}
// oidInAttributeTypeAndValue returns whether a type with the given OID exists
// in atv.
func oidInAttributeTypeAndValue(oid asn1.ObjectIdentifier, atv []AttributeTypeAndValue) bool {
for _, a := range atv {
if a.Type.Equal(oid) {
return true
}
}
return false
}
// CertificateList represents the ASN.1 structure of the same name. See RFC
// 5280, section 5.1. Use Certificate.CheckCRLSignature to verify the
// signature.
type CertificateList struct {
TBSCertList TBSCertificateList
SignatureAlgorithm AlgorithmIdentifier
SignatureValue asn1.BitString
}
// HasExpired reports whether certList should have been updated by now.
func (certList *CertificateList) HasExpired(now time.Time) bool {
return !now.Before(certList.TBSCertList.NextUpdate)
}
// TBSCertificateList represents the ASN.1 structure TBSCertList. See RFC
// 5280, section 5.1.
type TBSCertificateList struct {
Raw asn1.RawContent
Version int `asn1:"optional,default:0"`
Signature AlgorithmIdentifier
Issuer RDNSequence
ThisUpdate time.Time
NextUpdate time.Time `asn1:"optional"`
RevokedCertificates []RevokedCertificate `asn1:"optional"`
Extensions []Extension `asn1:"tag:0,optional,explicit"`
}
// RevokedCertificate represents the unnamed ASN.1 structure that makes up the
// revokedCertificates member of the TBSCertList structure. See RFC
// 5280, section 5.1.
type RevokedCertificate struct {
SerialNumber *big.Int
RevocationTime time.Time
Extensions []Extension `asn1:"optional"`
}

Some files were not shown because too many files have changed in this diff Show More