Update module github.com/containers/image/v5 to v5.26.0

Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
This commit is contained in:
renovate[bot]
2023-06-28 19:04:12 +00:00
committed by GitHub
parent bf7ae0a5d5
commit 1d5458fa7c
65 changed files with 604 additions and 9149 deletions

View File

@@ -1,25 +0,0 @@
*.iml
*.swo
*.swp
*.tfstate
*.tfstate.backup
*~
/.idea
/bazel-*
/commit_log
/coverage.txt
/createtree
/ct_hammer
/ct_server
/dump_tree
/licenses
/loglb
/maphammer
/mapreplay
/mdmtest
/protoc
/trillian_log_server
/trillian_log_signer
/trillian_map_server
default.etcd
cockroach-data/

View File

@@ -1,20 +0,0 @@
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
deadline: 90s
skip-files:
- types/internal/tls/tls.go
linters-settings:
gocyclo:
# minimal code complexity to report, 30 by default (but we recommend 10-20)
# TODO(mhutchinson): lower this again after reworking interceptor
min-complexity: 26
depguard:
list-type: blacklist
packages:
- golang.org/x/net/context
- github.com/gogo/protobuf/proto
issues:
# Don't turn off any checks by default. We can do this explicitly if needed.
exclude-use-default: false

View File

@@ -1,14 +0,0 @@
# This is the official list of benchmark authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
#
# Please keep the list sorted.
Antonio Marcedone <a.marcedone@gmail.com>
Google LLC
Internet Security Research Group
Vishal Kuo <vishalkuo@gmail.com>

File diff suppressed because it is too large Load Diff

View File

@@ -1,21 +0,0 @@
# See https://help.github.com/articles/about-codeowners/
# for more info about CODEOWNERS file
# It uses the same pattern rule for gitignore file
# https://git-scm.com/docs/gitignore#_pattern_format
#
# These owners will be the default owners for everything in
# the repo. Unless a later match takes precedence,
# @google/trillian-team will be requested for
# review when someone opens a pull request.
* @google/trillian-team
/*.proto @mhutchinson @AlCutter @pphaneuf
/storage/storagepb/storage.proto @mhutchinson @AlCutter @pphaneuf
# Mitigation for https://github.com/google/trillian/issues/1297
# Folks to watch out for hanges to DB schemas and ensure that
# there's a note added in a sensible location about how to
# upgrade schema instances.
/storage/mysql/schema/* @mhutchinson @AlCutter @pphaneuf
/storage/cloudspanner/spanner.sdl @mhutchinson @AlCutter @pphaneuf

View File

@@ -1,58 +0,0 @@
# How to contribute #
We'd love to accept your patches and contributions to this project. There are
a just a few small guidelines you need to follow.
## Contributor License Agreement ##
Contributions to any Google project must be accompanied by a Contributor
License Agreement. This is not a copyright **assignment**, it simply gives
Google permission to use and redistribute your contributions as part of the
project.
* If you are an individual writing original source code and you're sure you
own the intellectual property, then you'll need to sign an [individual
CLA][].
* If you work for a company that wants to allow you to contribute your work,
then you'll need to sign a [corporate CLA][].
You generally only need to submit a CLA once, so if you've already submitted
one (even if it was for a different project), you probably don't need to do it
again.
[individual CLA]: https://developers.google.com/open-source/cla/individual
[corporate CLA]: https://developers.google.com/open-source/cla/corporate
Once your CLA is submitted (or if you already submitted one for
another Google project), make a commit adding yourself to the
[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part
of your first [pull request][].
[AUTHORS]: AUTHORS
[CONTRIBUTORS]: CONTRIBUTORS
## Submitting a patch ##
1. It's generally best to start by opening a new issue describing the bug or
feature you're intending to fix. Even if you think it's relatively minor,
it's helpful to know what people are working on. Mention in the initial
issue that you are planning to work on that bug or feature so that it can
be assigned to you.
1. Follow the normal process of [forking][] the project, and setup a new
branch to work in. It's important that each group of changes be done in
separate branches in order to ensure that a pull request only includes the
commits related to that bug or feature.
1. Do your best to have [well-formed commit messages][] for each change.
This provides consistency throughout the project, and ensures that commit
messages are able to be formatted properly by various git tools.
1. Finally, push the commits to your fork and submit a [pull request][].
[forking]: https://help.github.com/articles/fork-a-repo
[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
[pull request]: https://help.github.com/articles/creating-a-pull-request

View File

@@ -1,39 +0,0 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
#
# Names should be added to this file as:
# Name <email address>
#
# Please keep the list sorted.
Al Cutter <al@google.com> <al@9600.org>
Alan Parra <alanparra@google.com>
Antonio Marcedone <a.marcedone@gmail.com>
Ben Laurie <benl@google.com> <ben@links.org>
David Drysdale <drysdale@google.com>
Gary Belvin <gbelvin@google.com>
Roland Shoemaker <roland@letsencrypt.org>
Martin Smith <mhs@google.com>
Martin Hutchinson <mhutchinson@google.com> <mhutchinson@gmail.com>
Paul Hadfield <hadfieldp@google.com> <paul@phad.org.uk>
Pavel Kalinnikov <pkalinnikov@google.com> <pavelkalinnikov@gmail.com>
Pierre Phaneuf <pphaneuf@google.com> <pphaneuf@gmail.com>
Rob Percival <robpercival@google.com>
Roger Ng <rogerng@google.com> <roger2hk@gmail.com>
Vishal Kuo <vishalkuo@gmail.com>

View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,15 +0,0 @@
<!---
Describe your changes in detail here.
If this fixes an issue, please write "Fixes #123", substituting the issue number.
-->
### Checklist
<!---
Go over all the following points, and put an `x` in all the boxes that apply.
Feel free to not tick any boxes that don't apply to this PR (e.g. refactoring may not need a CHANGELOG update).
If you're unsure about any of these, don't hesitate to ask. We're here to help!
-->
- [ ] I have updated the [CHANGELOG](CHANGELOG.md).
- [ ] I have updated [documentation](docs/) accordingly (including the [feature implementation matrix](docs/Feature_Implementation_Matrix.md)).

View File

@@ -1,317 +0,0 @@
# Trillian: General Transparency
[![Go Report Card](https://goreportcard.com/badge/github.com/google/trillian)](https://goreportcard.com/report/github.com/google/trillian)
[![codecov](https://codecov.io/gh/google/trillian/branch/master/graph/badge.svg?token=QwofUwmvAs)](https://codecov.io/gh/google/trillian)
[![GoDoc](https://godoc.org/github.com/google/trillian?status.svg)](https://godoc.org/github.com/google/trillian)
[![Slack Status](https://img.shields.io/badge/Slack-Chat-blue.svg)](https://gtrillian.slack.com/)
- [Overview](#overview)
- [Support](#support)
- [Using the Code](#using-the-code)
- [MySQL Setup](#mysql-setup)
- [Integration Tests](#integration-tests)
- [Working on the Code](#working-on-the-code)
- [Rebuilding Generated Code](#rebuilding-generated-code)
- [Updating Dependencies](#updating-dependencies)
- [Running Codebase Checks](#running-codebase-checks)
- [Design](#design)
- [Design Overview](#design-overview)
- [Personalities](#personalities)
- [Log Mode](#log-mode)
- [Use Cases](#use-cases)
- [Certificate Transparency Log](#certificate-transparency-log)
## Overview
Trillian is an implementation of the concepts described in the
[Verifiable Data Structures](docs/papers/VerifiableDataStructures.pdf) white paper,
which in turn is an extension and generalisation of the ideas which underpin
[Certificate Transparency](https://certificate-transparency.org).
Trillian implements a [Merkle tree](https://en.wikipedia.org/wiki/Merkle_tree)
whose contents are served from a data storage layer, to allow scalability to
extremely large trees. On top of this Merkle tree, Trillian provides the
following:
- An append-only **Log** mode, analogous to the original
[Certificate Transparency](https://certificate-transparency.org) logs. In
this mode, the Merkle tree is effectively filled up from the left, giving a
*dense* Merkle tree.
Note that Trillian requires particular applications to provide their own
[personalities](#personalities) on top of the core transparent data store
functionality.
[Certificate Transparency (CT)](https://tools.ietf.org/html/rfc6962)
is the most well-known and widely deployed transparency application, and an implementation of CT as a Trillian personality is available in the
[certificate-transparency-go repo](https://github.com/google/certificate-transparency-go/blob/master/trillian).
Other examples of Trillian personalities are available in the
[trillian-examples](https://github.com/google/trillian-examples) repo.
## Support
- Mailing list: https://groups.google.com/forum/#!forum/trillian-transparency
- Slack: https://gtrillian.slack.com/ ([invitation](https://join.slack.com/t/gtrillian/shared_invite/enQtNDM3NTE3NjA4NDcwLTMwYzVlMDUxMDQ2MGU5MjcyZGIxMmVmZGNlNzdhMzRlOGFjMWJkNzc0MGY1Y2QyNWQyMWM4NzJlOGMxNTZkZGU))
## Using the Code
The Trillian codebase is stable and is used in production by multiple
organizations, including many large-scale
[Certificate Transparency](https://certificate.transparency.dev) log
operators.
Given this, we do not plan to add any new features to this version of Trillian,
and will try to avoid any further incompatible code and schema changes but
cannot guarantee that they will never be necessary.
The current state of feature implementation is recorded in the
[Feature implementation matrix](docs/Feature_Implementation_Matrix.md).
To build and test Trillian you need:
- Go 1.19 or later (go 1.19 matches cloudbuild, and is preferred for developers
that will be submitting PRs to this project).
To run many of the tests (and production deployment) you need:
- [MySQL](https://www.mysql.com/) or [MariaDB](https://mariadb.org/) to provide
the data storage layer; see the [MySQL Setup](#mysql-setup) section.
Note that this repository uses Go modules to manage dependencies; Go will fetch
and install them automatically upon build/test.
To fetch the code, dependencies, and build Trillian, run the following:
```bash
git clone https://github.com/google/trillian.git
cd trillian
go build ./...
```
To build and run tests, use:
```bash
go test ./...
```
The repository also includes multi-process integration tests, described in the
[Integration Tests](#integration-tests) section below.
### MySQL Setup
To run Trillian's integration tests you need to have an instance of MySQL
running and configured to:
- listen on the standard MySQL port 3306 (so `mysql --host=127.0.0.1
--port=3306` connects OK)
- not require a password for the `root` user
You can then set up the [expected tables](storage/mysql/schema/storage.sql) in a
`test` database like so:
```bash
./scripts/resetdb.sh
Warning: about to destroy and reset database 'test'
Are you sure? y
> Resetting DB...
> Reset Complete
```
### Integration Tests
Trillian includes an integration test suite to confirm basic end-to-end
functionality, which can be run with:
```bash
./integration/integration_test.sh
```
This runs a multi-process test:
- A [test](integration/log_integration_test.go) that starts a Trillian server
in Log mode, together with a signer, logs many leaves, and checks they are
integrated correctly.
### Deployment
You can find instructions on how to deploy Trillian in [deployment](/deployment)
and [examples/deployment](/examples/deployment) directories.
## Working on the Code
Developers who want to make changes to the Trillian codebase need some
additional dependencies and tools, described in the following sections. The
[Cloud Build configuration](cloudbuild.yaml) and the scripts it depends on are
also a useful reference for the required tools and scripts, as it may be more
up-to-date than this document.
### Rebuilding Generated Code
Some of the Trillian Go code is autogenerated from other files:
- [gRPC](http://www.grpc.io/) message structures are originally provided as
[protocol buffer](https://developers.google.com/protocol-buffers/) message
definitions. See also, https://grpc.io/docs/protoc-installation/.
- Some unit tests use mock implementations of interfaces; these are created
from the real implementations by [GoMock](https://github.com/golang/mock).
- Some enums have string-conversion methods (satisfying the `fmt.Stringer`
interface) created using the
[stringer](https://godoc.org/golang.org/x/tools/cmd/stringer) tool (`go get
golang.org/x/tools/cmd/stringer`).
Re-generating mock or protobuffer files is only needed if you're changing
the original files; if you do, you'll need to install the prerequisites:
- a series of tools, using `go install` to ensure that the versions are
compatible and tested:
```
cd $(go list -f '{{ .Dir }}' github.com/google/trillian); \
go install github.com/golang/mock/mockgen; \
go install google.golang.org/protobuf/proto; \
go install google.golang.org/protobuf/cmd/protoc-gen-go; \
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc; \
go install github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc; \
go install golang.org/x/tools/cmd/stringer
```
and run the following:
```bash
go generate -x ./... # hunts for //go:generate comments and runs them
```
### Updating Dependencies
The Trillian codebase uses go.mod to declare fixed versions of its dependencies.
With Go modules, updating a dependency simply involves running `go get`:
```
go get package/path # Fetch the latest published version
go get package/path@X.Y.Z # Fetch a specific published version
go get package/path@HEAD # Fetch the latest commit
```
To update ALL dependencies to the latest version run `go get -u`.
Be warned however, that this may undo any selected versions that resolve issues in other non-module repos.
While running `go build` and `go test`, go will add any ambiguous transitive dependencies to `go.mod`
To clean these up run:
```
go mod tidy
```
### Running Codebase Checks
The [`scripts/presubmit.sh`](scripts/presubmit.sh) script runs various tools
and tests over the codebase.
#### Install [golangci-lint](https://github.com/golangci/golangci-lint#local-installation).
```bash
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.1
```
#### Run code generation, build, test and linters
```bash
./scripts/presubmit.sh
```
#### Or just run the linters alone
```bash
golangci-lint run
```
## Design
### Design Overview
Trillian is primarily implemented as a
[gRPC service](http://www.grpc.io/docs/guides/concepts.html#service-definition);
this service receives get/set requests over gRPC and retrieves the corresponding
Merkle tree data from a separate storage layer (currently using MySQL), ensuring
that the cryptographic properties of the tree are preserved along the way.
The Trillian service is multi-tenanted a single Trillian installation can
support multiple Merkle trees in parallel, distinguished by their `TreeId` and
each tree operates in one of two modes:
- **Log** mode: an append-only collection of items; this has two sub-modes:
- normal Log mode, where the Trillian service assigns sequence numbers to
new tree entries as they arrive
- 'preordered' Log mode, where the unique sequence number for entries in
the Merkle tree is externally specified
In either case, Trillian's key transparency property is that cryptographic
proofs of inclusion/consistency are available for data items added to the
service.
### Personalities
To build a complete transparent application, the Trillian core service needs
to be paired with additional code, known as a *personality*, that provides
functionality that is specific to the particular application.
In particular, the personality is responsible for:
* **Admission Criteria** ensuring that submissions comply with the
overall purpose of the application.
* **Canonicalization** ensuring that equivalent versions of the same
data get the same canonical identifier, so they can be de-duplicated by
the Trillian core service.
* **External Interface** providing an API for external users,
including any practical constraints (ACLs, load-balancing, DoS protection,
etc.)
This is
[described in more detail in a separate document](docs/Personalities.md).
General
[design considerations for transparent Log applications](docs/TransparentLogging.md)
are also discussed separately.
### Log Mode
When running in Log mode, Trillian provides a gRPC API whose operations are
similar to those available for Certificate Transparency logs
(cf. [RFC 6962](https://tools.ietf.org/html/6962)). These include:
- `GetLatestSignedLogRoot` returns information about the current root of the
Merkle tree for the log, including the tree size, hash value, timestamp and
signature.
- `GetLeavesByRange` returns leaf information for particular leaves,
specified by their index in the log.
- `QueueLeaf` requests inclusion of the specified item into the log.
- For a pre-ordered log, `AddSequencedLeaves` requests the inclusion of
specified items into the log at specified places in the tree.
- `GetInclusionProof`, `GetInclusionProofByHash` and `GetConsistencyProof`
return inclusion and consistency proof data.
In Log mode (whether normal or pre-ordered), Trillian includes an additional
Signer component; this component periodically processes pending items and
adds them to the Merkle tree, creating a new signed tree head as a result.
![Log components](docs/images/LogDesign.png)
(Note that each of the components in this diagram can be
[distributed](https://github.com/google/certificate-transparency-go/blob/master/trillian/docs/ManualDeployment.md#distribution),
for scalability and resilience.)
Use Cases
---------
### Certificate Transparency Log
The most obvious application for Trillian in Log mode is to provide a
Certificate Transparency (RFC 6962) Log. To do this, the CT Log personality
needs to include all of the certificate-specific processing in particular,
checking that an item that has been suggested for inclusion is indeed a valid
certificate that chains to an accepted root.

View File

@@ -1,186 +0,0 @@
# This file contains Google Cloud Build configuration for presubmit checks, unit
# and integration tests, triggered by pull requests and commits to branches.
timeout: 1800s
substitutions:
_CODECOV_TOKEN: "" # The auth token for uploading coverage to Codecov.
options:
machineType: E2_HIGHCPU_32
volumes:
# A shared volume for caching Go modules between steps.
- name: go-modules
path: /go
env:
- GOPATH=/go
- GOLANG_PROTOBUF_REGISTRATION_CONFLICT=ignore # Temporary work-around v1.proto already registered error.
- DOCKER_CLIENT_TIMEOUT=120
- COMPOSE_HTTP_TIMEOUT=120
# Cache the testbase image in Container Regisrty, to be reused by subsequent
# builds. The technique is described here:
# https://cloud.google.com/cloud-build/docs/speeding-up-builds#using_a_cached_docker_image
#
# TODO(pavelkalinnikov): Consider pushing this image only on commits to master.
images: ['gcr.io/$PROJECT_ID/trillian_testbase:latest']
# Cloud Build logs sent to GCS bucket
logsBucket: 'gs://trillian-cloudbuild-logs'
steps:
# Try to pull the testbase image from Container Registry.
- name: 'gcr.io/cloud-builders/docker'
entrypoint: 'bash'
args: ['-c', 'docker pull gcr.io/$PROJECT_ID/trillian_testbase:latest || exit 0']
# Build the testbase image reusing as much of the cached image as possible.
- name: 'gcr.io/cloud-builders/docker'
args: [
'build',
'-t', 'gcr.io/$PROJECT_ID/trillian_testbase:latest',
'--cache-from', 'gcr.io/$PROJECT_ID/trillian_testbase:latest',
'-f', './integration/cloudbuild/testbase/Dockerfile',
'.'
]
# Set up tools and any other common steps which should not be part of Docker image.
- id: prepare
name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
entrypoint: ./integration/cloudbuild/prepare.sh
# Run lint and porcelain checks, make sure the diff is empty and no files need
# to be updated. This includes gofmt, golangci-linter, go mod tidy, go mod
# generate and a few more.
- id: lint
name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
entrypoint: ./scripts/presubmit.sh
args:
- --no-build
- --fix
- --no-mod-tidy
- --empty-diff
waitFor:
- prepare
# Presubmit
- id: presubmit
name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
entrypoint: ./integration/cloudbuild/run_presubmit.sh
args:
- --no-linters
- --no-generate
env:
- GOFLAGS=-race
- GO_TEST_TIMEOUT=20m
waitFor:
- lint
# Codecov
- id: codecov
name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
entrypoint: ./integration/cloudbuild/run_presubmit.sh
args:
- --coverage
- --no-linters
- --no-generate
env:
- GOFLAGS=-race
- GO_TEST_TIMEOUT=20m
- CODECOV_TOKEN=${_CODECOV_TOKEN}
waitFor:
- lint
# Presubmit (Batched queue)
- id: presubmit_batched
name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
entrypoint: ./integration/cloudbuild/run_presubmit.sh
args:
- --no-linters
- --no-generate
env:
- GOFLAGS=-race --tags=batched_queue
- GO_TEST_TIMEOUT=20m
waitFor:
- lint
# Presubmit (PKCS11)
- id: presubmit_pkcs11
name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
entrypoint: ./integration/cloudbuild/run_presubmit.sh
args:
- --no-linters
- --no-generate
env:
- GOFLAGS=-race --tags=pkcs11
- GO_TEST_TIMEOUT=20m
waitFor:
- lint
# Try to spread the load a bit, we'll wait for all the presubmit.* steps
# to finish before starting the integration.* ones.
# Having too many "big" things running concurrently leads to problems
# with timeouts and mysql issues.
- id: presubmits_done
name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
entrypoint: /bin/true
waitFor:
- codecov
- presubmit
- presubmit_batched
- presubmit_pkcs11
# Integration
- id: integration
name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
entrypoint: ./integration/cloudbuild/run_integration.sh
env:
- GO_TEST_TIMEOUT=20m
waitFor:
- presubmits_done
# Integration (Docker)
- id: integration_docker
name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
entrypoint: ./integration/docker_compose_integration_test.sh
waitFor:
- presubmits_done
# Integration (etcd)
- id: integration_etcd
name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
entrypoint: ./integration/cloudbuild/run_integration.sh
env:
- ETCD_DIR=/go/bin
- GOFLAGS=-race
- GO_TEST_TIMEOUT=20m
waitFor:
- presubmits_done
# Integration (Batched queue)
- id: integration_batched
name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
entrypoint: ./integration/cloudbuild/run_integration.sh
env:
- GOFLAGS=-race -tags=batched_queue
- GO_TEST_TIMEOUT=20m
waitFor:
- presubmits_done
# Integration (PKCS11)
- id: integration_pkcs11
name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
entrypoint: ./integration/cloudbuild/run_integration.sh
env:
- GOFLAGS=-race -tags=pkcs11
- GO_TEST_TIMEOUT=20m
waitFor:
- presubmits_done
# Integration (MariaDB)
- id: integration_mariadb
name: 'gcr.io/${PROJECT_ID}/trillian_testbase'
entrypoint: ./integration/cloudbuild/run_integration.sh
env:
- GO_TEST_TIMEOUT=20m
- MYSQLD_IMAGE=mariadb:10.3
waitFor:
- presubmits_done

View File

@@ -1,165 +0,0 @@
timeout: 1800s
substitutions:
_CLUSTER_NAME: trillian-opensource-ci
_MASTER_ZONE: us-central1-a
_MYSQL_TAG: "5.7"
_MYSQL_ROOT_PASSWORD: ""
_MYSQL_PASSWORD: ""
options:
machineType: E2_HIGHCPU_32
steps:
- id: pull_mysql
name : gcr.io/cloud-builders/docker
args:
- pull
- marketplace.gcr.io/google/mysql5:${_MYSQL_TAG}
- id: tag_mysql
name: gcr.io/cloud-builders/docker
args:
- tag
- marketplace.gcr.io/google/mysql5:${_MYSQL_TAG}
- gcr.io/${PROJECT_ID}/mysql5:${_MYSQL_TAG}
waitFor:
- pull_mysql
- id: push_mysql
name: gcr.io/cloud-builders/docker
args:
- push
- gcr.io/${PROJECT_ID}/mysql5:${_MYSQL_TAG}
waitFor:
- tag_mysql
- id: build_db_server
name: gcr.io/kaniko-project/executor:v1.6.0
args:
- --dockerfile=examples/deployment/docker/db_server/Dockerfile
- --destination=gcr.io/${PROJECT_ID}/db_server:${COMMIT_SHA}
- --destination=gcr.io/${PROJECT_ID}/db_server:latest
- --cache=true
- --cache-dir= # Cache is in Google Container Registry
waitFor:
- push_mysql
- id: build_log_server
name: gcr.io/kaniko-project/executor:v1.6.0
args:
- --dockerfile=examples/deployment/docker/log_server/Dockerfile
- --destination=gcr.io/${PROJECT_ID}/log_server:${COMMIT_SHA}
- --destination=gcr.io/${PROJECT_ID}/log_server:latest
- --cache=true
- --cache-dir= # Cache is in Google Container Registry
waitFor: ["-"]
- id: build_log_signer
name: gcr.io/kaniko-project/executor:v1.6.0
args:
- --dockerfile=examples/deployment/docker/log_signer/Dockerfile
- --destination=gcr.io/${PROJECT_ID}/log_signer:${COMMIT_SHA}
- --destination=gcr.io/${PROJECT_ID}/log_signer:latest
- --cache=true
- --cache-dir= # Cache is in Google Container Registry
waitFor: ["-"]
- id: build_envsubst
name: gcr.io/cloud-builders/docker
args:
- build
- examples/deployment/docker/envsubst
- -t
- envsubst
waitFor: ["-"]
# etcd-operator requires that a ClusterRole has been created for it already.
# Do this manually using examples/deployment/kubernetes/etcd-role*.yaml.
- id: apply_k8s_cfgs_for_clusterwide_etcd_operator
name: gcr.io/cloud-builders/kubectl
args:
- apply
- -f=examples/deployment/kubernetes/etcd-deployment.yaml
env:
- CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
- CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
waitFor: ["-"]
- id: copy_k8s_cfgs_for_spanner
name: busybox
entrypoint: cp
args:
- -r
- examples/deployment/kubernetes/
- envsubst-spanner/
waitFor: ['-']
- id: envsubst_k8s_cfgs_for_spanner
name: envsubst
args:
- envsubst-spanner/etcd-cluster.yaml
- envsubst-spanner/trillian-ci-spanner.yaml
- envsubst-spanner/trillian-log-deployment.yaml
- envsubst-spanner/trillian-log-service.yaml
- envsubst-spanner/trillian-log-signer-deployment.yaml
- envsubst-spanner/trillian-log-signer-service.yaml
env:
- PROJECT_ID=${PROJECT_ID}
- IMAGE_TAG=${COMMIT_SHA}
waitFor:
- build_envsubst
- copy_k8s_cfgs_for_spanner
- id: apply_k8s_cfgs_for_spanner
name: gcr.io/cloud-builders/kubectl
args:
- apply
- -f=envsubst-spanner/etcd-cluster.yaml
- -f=envsubst-spanner/trillian-ci-spanner.yaml
- -f=envsubst-spanner/trillian-log-deployment.yaml
- -f=envsubst-spanner/trillian-log-service.yaml
- -f=envsubst-spanner/trillian-log-signer-deployment.yaml
- -f=envsubst-spanner/trillian-log-signer-service.yaml
env:
- CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
- CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
waitFor:
- envsubst_k8s_cfgs_for_spanner
- build_log_server
- build_log_signer
- id: copy_k8s_cfgs_for_mysql
name: busybox
entrypoint: cp
args:
- -r
- examples/deployment/kubernetes/
- envsubst-mysql/
waitFor: ['-']
- id: envsubst_k8s_cfgs_for_mysql
name: envsubst
args:
- envsubst-mysql/etcd-cluster.yaml
- envsubst-mysql/trillian-ci-mysql.yaml
- envsubst-mysql/trillian-mysql.yaml
- envsubst-mysql/trillian-log-deployment.yaml
- envsubst-mysql/trillian-log-service.yaml
- envsubst-mysql/trillian-log-signer-deployment.yaml
- envsubst-mysql/trillian-log-signer-service.yaml
env:
- PROJECT_ID=${PROJECT_ID}
- IMAGE_TAG=${COMMIT_SHA}
- MYSQL_ROOT_PASSWORD=${_MYSQL_ROOT_PASSWORD}
- MYSQL_USER=trillian
- MYSQL_PASSWORD=${_MYSQL_PASSWORD}
- MYSQL_DATABASE=trillian
waitFor:
- build_envsubst
- copy_k8s_cfgs_for_mysql
- id: apply_k8s_cfgs_for_mysql
name: gcr.io/cloud-builders/kubectl
args:
- apply
- --namespace=mysql
- -f=envsubst-mysql/etcd-cluster.yaml
- -f=envsubst-mysql/trillian-ci-mysql.yaml
- -f=envsubst-mysql/trillian-mysql.yaml
- -f=envsubst-mysql/trillian-log-deployment.yaml
- -f=envsubst-mysql/trillian-log-service.yaml
- -f=envsubst-mysql/trillian-log-signer-deployment.yaml
- -f=envsubst-mysql/trillian-log-signer-service.yaml
env:
- CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
- CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
waitFor:
- envsubst_k8s_cfgs_for_mysql
- build_db_server
- build_log_server
- build_log_signer

View File

@@ -1,175 +0,0 @@
# This file contains configuration for Cloud Builds triggered by pull requests
# to this repository.
timeout: 1800s
substitutions:
_CLUSTER_NAME: trillian-opensource-ci
_MASTER_ZONE: us-central1-a
_MYSQL_TAG: "5.7"
_MYSQL_ROOT_PASSWORD: ""
_MYSQL_PASSWORD: ""
options:
machineType: E2_HIGHCPU_32
steps:
- id: pull_mysql
name : gcr.io/cloud-builders/docker
args:
- pull
- marketplace.gcr.io/google/mysql5:${_MYSQL_TAG}
- id: tag_mysql
name: gcr.io/cloud-builders/docker
args:
- tag
- marketplace.gcr.io/google/mysql5:${_MYSQL_TAG}
- gcr.io/${PROJECT_ID}/mysql5:${_MYSQL_TAG}
waitFor:
- pull_mysql
- id: push_mysql
name: gcr.io/cloud-builders/docker
args:
- push
- gcr.io/${PROJECT_ID}/mysql5:${_MYSQL_TAG}
waitFor:
- tag_mysql
- id: build_db_server
name: gcr.io/kaniko-project/executor:v1.6.0
args:
- --dockerfile=examples/deployment/docker/db_server/Dockerfile
- --destination=gcr.io/${PROJECT_ID}/db_server:${COMMIT_SHA}
- --cache=true
- --cache-dir= # Cache is in Google Container Registry.
waitFor:
- push_mysql
- id: build_log_server
name: gcr.io/kaniko-project/executor:v1.6.0
args:
- --dockerfile=examples/deployment/docker/log_server/Dockerfile
- --destination=gcr.io/${PROJECT_ID}/log_server:${COMMIT_SHA}
- --cache=true
- --cache-dir= # Cache is in Google Container Registry
waitFor: ['-']
- id: build_log_signer
name: gcr.io/kaniko-project/executor:v1.6.0
args:
- --dockerfile=examples/deployment/docker/log_signer/Dockerfile
- --destination=gcr.io/${PROJECT_ID}/log_signer:${COMMIT_SHA}
- --cache=true
- --cache-dir= # Cache is in Google Container Registry
waitFor: ['-']
- id: build_envsubst
name: gcr.io/cloud-builders/docker
args:
- build
- examples/deployment/docker/envsubst
- -t
- envsubst
waitFor: ["-"]
- id: apply_k8s_cfgs_for_clusterwide_etcd_operator_dryrun
name: gcr.io/cloud-builders/kubectl
args:
- apply
- --dry-run=server
- -f=examples/deployment/kubernetes/etcd-deployment.yaml
env:
- CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
- CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
waitFor: ['-']
- id: copy_k8s_cfgs_for_spanner
name: busybox
entrypoint: cp
args:
- -r
- examples/deployment/kubernetes/
- envsubst-spanner/
waitFor: ['-']
- id: envsubst_k8s_cfgs_for_spanner
name: envsubst
args:
- envsubst-spanner/etcd-cluster.yaml
- envsubst-spanner/trillian-ci-spanner.yaml
- envsubst-spanner/trillian-log-deployment.yaml
- envsubst-spanner/trillian-log-service.yaml
- envsubst-spanner/trillian-log-signer-deployment.yaml
- envsubst-spanner/trillian-log-signer-service.yaml
env:
- PROJECT_ID=${PROJECT_ID}
- IMAGE_TAG=${COMMIT_SHA}
waitFor:
- build_envsubst
- copy_k8s_cfgs_for_spanner
- id: apply_k8s_cfgs_for_spanner_dryrun
name: gcr.io/cloud-builders/kubectl
args:
- apply
- --dry-run=server
- -f=envsubst-spanner/etcd-cluster.yaml
- -f=envsubst-spanner/trillian-ci-spanner.yaml
- -f=envsubst-spanner/trillian-log-deployment.yaml
- -f=envsubst-spanner/trillian-log-service.yaml
- -f=envsubst-spanner/trillian-log-signer-deployment.yaml
- -f=envsubst-spanner/trillian-log-signer-service.yaml
- --prune
- --all
- --prune-whitelist=core/v1/ConfigMap
env:
- CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
- CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
waitFor:
- envsubst_k8s_cfgs_for_spanner
- build_log_server
- build_log_signer
- id: copy_k8s_cfgs_for_mysql
name: busybox
entrypoint: cp
args:
- -r
- examples/deployment/kubernetes/
- envsubst-mysql/
waitFor: ['-']
- id: envsubst_k8s_cfgs_for_mysql
name: envsubst
args:
- envsubst-mysql/etcd-cluster.yaml
- envsubst-mysql/trillian-ci-mysql.yaml
- envsubst-mysql/trillian-mysql.yaml
- envsubst-mysql/trillian-log-deployment.yaml
- envsubst-mysql/trillian-log-service.yaml
- envsubst-mysql/trillian-log-signer-deployment.yaml
- envsubst-mysql/trillian-log-signer-service.yaml
env:
- PROJECT_ID=${PROJECT_ID}
- IMAGE_TAG=${COMMIT_SHA}
- MYSQL_ROOT_PASSWORD=${_MYSQL_ROOT_PASSWORD}
- MYSQL_PASSWORD=${_MYSQL_PASSWORD}
waitFor:
- build_envsubst
- copy_k8s_cfgs_for_mysql
- id: apply_k8s_cfgs_for_mysql_dryrun
name: gcr.io/cloud-builders/kubectl
args:
- apply
- --dry-run=server
- --namespace=mysql
- -f=envsubst-mysql/etcd-cluster.yaml
- -f=envsubst-mysql/trillian-ci-mysql.yaml
- -f=envsubst-mysql/trillian-mysql.yaml
- -f=envsubst-mysql/trillian-log-deployment.yaml
- -f=envsubst-mysql/trillian-log-service.yaml
- -f=envsubst-mysql/trillian-log-signer-deployment.yaml
- -f=envsubst-mysql/trillian-log-signer-service.yaml
- --prune
- --all
- --prune-whitelist=core/v1/ConfigMap
env:
- CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE}
- CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME}
waitFor:
- envsubst_k8s_cfgs_for_mysql
- build_db_server
- build_log_server
- build_log_signer

View File

@@ -1,51 +0,0 @@
timeout: 1800s
substitutions:
_MYSQL_TAG: "5.7"
options:
machineType: E2_HIGHCPU_32
steps:
- id: pull_mysql
name : gcr.io/cloud-builders/docker
args:
- pull
- marketplace.gcr.io/google/mysql5:${_MYSQL_TAG}
- id: tag_mysql
name: gcr.io/cloud-builders/docker
args:
- tag
- marketplace.gcr.io/google/mysql5:${_MYSQL_TAG}
- gcr.io/${PROJECT_ID}/mysql5:${_MYSQL_TAG}
waitFor:
- pull_mysql
- id: push_mysql
name: gcr.io/cloud-builders/docker
args:
- push
- gcr.io/${PROJECT_ID}/mysql5:${_MYSQL_TAG}
waitFor:
- tag_mysql
- id: build_db_server
name: gcr.io/kaniko-project/executor:v1.6.0
args:
- --dockerfile=examples/deployment/docker/db_server/Dockerfile
- --destination=gcr.io/${PROJECT_ID}/db_server:${TAG_NAME}
- --cache=true
- --cache-dir= # Cache is in Google Container Registry
waitFor:
- push_mysql
- id: build_log_server
name: gcr.io/kaniko-project/executor:v1.6.0
args:
- --dockerfile=examples/deployment/docker/log_server/Dockerfile
- --destination=gcr.io/${PROJECT_ID}/log_server:${TAG_NAME}
- --cache=true
- --cache-dir= # Cache is in Google Container Registry
waitFor: ["-"]
- id: build_log_signer
name: gcr.io/kaniko-project/executor:v1.6.0
args:
- --dockerfile=examples/deployment/docker/log_signer/Dockerfile
- --destination=gcr.io/${PROJECT_ID}/log_signer:${TAG_NAME}
- --cache=true
- --cache-dir= # Cache is in Google Container Registry
waitFor: ["-"]

View File

@@ -1,22 +0,0 @@
# Customizations to codecov for Trillian repo. This will be merged into
# the team / default codecov yaml file.
#
# Validate changes with:
# curl --data-binary @codecov.yml https://codecov.io/validate
# Exclude code that's for testing, demos or utilities that aren't really
# part of production releases.
ignore:
- "**/mock_*.go"
- "**/testonly"
- "docs"
- "examples"
- "integration"
- "testonly"
coverage:
status:
project:
default:
# Allow 1% coverage drop without complaining, to avoid being too noisy.
threshold: 1%

View File

@@ -1,22 +0,0 @@
// Copyright 2016 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package trillian contains the generated protobuf code for the Trillian API.
package trillian
//go:generate protoc -I=. -I=third_party/googleapis --go_out=paths=source_relative:. --go-grpc_out=paths=source_relative:. --go-grpc_opt=require_unimplemented_servers=false trillian_log_api.proto trillian_admin_api.proto trillian.proto --doc_out=markdown,api.md:./docs/
//go:generate protoc -I=. --go_out=paths=source_relative:. crypto/keyspb/keyspb.proto
//go:generate mockgen -package tmock -destination testonly/tmock/mock_log_server.go github.com/google/trillian TrillianLogServer
//go:generate mockgen -package tmock -destination testonly/tmock/mock_admin_server.go github.com/google/trillian TrillianAdminServer

View File

@@ -1,808 +0,0 @@
// Copyright 2016 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.30.0
// protoc v3.20.1
// source: trillian.proto
package trillian
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
anypb "google.golang.org/protobuf/types/known/anypb"
durationpb "google.golang.org/protobuf/types/known/durationpb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// LogRootFormat specifies the fields that are covered by the
// SignedLogRoot signature, as well as their ordering and formats.
type LogRootFormat int32
const (
LogRootFormat_LOG_ROOT_FORMAT_UNKNOWN LogRootFormat = 0
LogRootFormat_LOG_ROOT_FORMAT_V1 LogRootFormat = 1
)
// Enum value maps for LogRootFormat.
var (
LogRootFormat_name = map[int32]string{
0: "LOG_ROOT_FORMAT_UNKNOWN",
1: "LOG_ROOT_FORMAT_V1",
}
LogRootFormat_value = map[string]int32{
"LOG_ROOT_FORMAT_UNKNOWN": 0,
"LOG_ROOT_FORMAT_V1": 1,
}
)
func (x LogRootFormat) Enum() *LogRootFormat {
p := new(LogRootFormat)
*p = x
return p
}
func (x LogRootFormat) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (LogRootFormat) Descriptor() protoreflect.EnumDescriptor {
return file_trillian_proto_enumTypes[0].Descriptor()
}
func (LogRootFormat) Type() protoreflect.EnumType {
return &file_trillian_proto_enumTypes[0]
}
func (x LogRootFormat) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use LogRootFormat.Descriptor instead.
func (LogRootFormat) EnumDescriptor() ([]byte, []int) {
return file_trillian_proto_rawDescGZIP(), []int{0}
}
// Defines the way empty / node / leaf hashes are constructed incorporating
// preimage protection, which can be application specific.
type HashStrategy int32
const (
// Hash strategy cannot be determined. Included to enable detection of
// mismatched proto versions being used. Represents an invalid value.
HashStrategy_UNKNOWN_HASH_STRATEGY HashStrategy = 0
// Certificate Transparency strategy: leaf hash prefix = 0x00, node prefix =
// 0x01, empty hash is digest([]byte{}), as defined in the specification.
HashStrategy_RFC6962_SHA256 HashStrategy = 1
// Sparse Merkle Tree strategy: leaf hash prefix = 0x00, node prefix = 0x01,
// empty branch is recursively computed from empty leaf nodes.
// NOT secure in a multi tree environment. For testing only.
HashStrategy_TEST_MAP_HASHER HashStrategy = 2
// Append-only log strategy where leaf nodes are defined as the ObjectHash.
// All other properties are equal to RFC6962_SHA256.
HashStrategy_OBJECT_RFC6962_SHA256 HashStrategy = 3
// The CONIKS sparse tree hasher with SHA512_256 as the hash algorithm.
HashStrategy_CONIKS_SHA512_256 HashStrategy = 4
// The CONIKS sparse tree hasher with SHA256 as the hash algorithm.
HashStrategy_CONIKS_SHA256 HashStrategy = 5
)
// Enum value maps for HashStrategy.
var (
HashStrategy_name = map[int32]string{
0: "UNKNOWN_HASH_STRATEGY",
1: "RFC6962_SHA256",
2: "TEST_MAP_HASHER",
3: "OBJECT_RFC6962_SHA256",
4: "CONIKS_SHA512_256",
5: "CONIKS_SHA256",
}
HashStrategy_value = map[string]int32{
"UNKNOWN_HASH_STRATEGY": 0,
"RFC6962_SHA256": 1,
"TEST_MAP_HASHER": 2,
"OBJECT_RFC6962_SHA256": 3,
"CONIKS_SHA512_256": 4,
"CONIKS_SHA256": 5,
}
)
func (x HashStrategy) Enum() *HashStrategy {
p := new(HashStrategy)
*p = x
return p
}
func (x HashStrategy) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (HashStrategy) Descriptor() protoreflect.EnumDescriptor {
return file_trillian_proto_enumTypes[1].Descriptor()
}
func (HashStrategy) Type() protoreflect.EnumType {
return &file_trillian_proto_enumTypes[1]
}
func (x HashStrategy) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use HashStrategy.Descriptor instead.
func (HashStrategy) EnumDescriptor() ([]byte, []int) {
return file_trillian_proto_rawDescGZIP(), []int{1}
}
// State of the tree.
type TreeState int32
const (
// Tree state cannot be determined. Included to enable detection of
// mismatched proto versions being used. Represents an invalid value.
TreeState_UNKNOWN_TREE_STATE TreeState = 0
// Active trees are able to respond to both read and write requests.
TreeState_ACTIVE TreeState = 1
// Frozen trees are only able to respond to read requests, writing to a frozen
// tree is forbidden. Trees should not be frozen when there are entries
// in the queue that have not yet been integrated. See the DRAINING
// state for this case.
TreeState_FROZEN TreeState = 2
// Deprecated: now tracked in Tree.deleted.
//
// Deprecated: Marked as deprecated in trillian.proto.
TreeState_DEPRECATED_SOFT_DELETED TreeState = 3
// Deprecated: now tracked in Tree.deleted.
//
// Deprecated: Marked as deprecated in trillian.proto.
TreeState_DEPRECATED_HARD_DELETED TreeState = 4
// A tree that is draining will continue to integrate queued entries.
// No new entries should be accepted.
TreeState_DRAINING TreeState = 5
)
// Enum value maps for TreeState.
var (
TreeState_name = map[int32]string{
0: "UNKNOWN_TREE_STATE",
1: "ACTIVE",
2: "FROZEN",
3: "DEPRECATED_SOFT_DELETED",
4: "DEPRECATED_HARD_DELETED",
5: "DRAINING",
}
TreeState_value = map[string]int32{
"UNKNOWN_TREE_STATE": 0,
"ACTIVE": 1,
"FROZEN": 2,
"DEPRECATED_SOFT_DELETED": 3,
"DEPRECATED_HARD_DELETED": 4,
"DRAINING": 5,
}
)
func (x TreeState) Enum() *TreeState {
p := new(TreeState)
*p = x
return p
}
func (x TreeState) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (TreeState) Descriptor() protoreflect.EnumDescriptor {
return file_trillian_proto_enumTypes[2].Descriptor()
}
func (TreeState) Type() protoreflect.EnumType {
return &file_trillian_proto_enumTypes[2]
}
func (x TreeState) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use TreeState.Descriptor instead.
func (TreeState) EnumDescriptor() ([]byte, []int) {
return file_trillian_proto_rawDescGZIP(), []int{2}
}
// Type of the tree.
type TreeType int32
const (
// Tree type cannot be determined. Included to enable detection of mismatched
// proto versions being used. Represents an invalid value.
TreeType_UNKNOWN_TREE_TYPE TreeType = 0
// Tree represents a verifiable log.
TreeType_LOG TreeType = 1
// Tree represents a verifiable pre-ordered log, i.e., a log whose entries are
// placed according to sequence numbers assigned outside of Trillian.
TreeType_PREORDERED_LOG TreeType = 3
)
// Enum value maps for TreeType.
var (
TreeType_name = map[int32]string{
0: "UNKNOWN_TREE_TYPE",
1: "LOG",
3: "PREORDERED_LOG",
}
TreeType_value = map[string]int32{
"UNKNOWN_TREE_TYPE": 0,
"LOG": 1,
"PREORDERED_LOG": 3,
}
)
func (x TreeType) Enum() *TreeType {
p := new(TreeType)
*p = x
return p
}
func (x TreeType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (TreeType) Descriptor() protoreflect.EnumDescriptor {
return file_trillian_proto_enumTypes[3].Descriptor()
}
func (TreeType) Type() protoreflect.EnumType {
return &file_trillian_proto_enumTypes[3]
}
func (x TreeType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use TreeType.Descriptor instead.
func (TreeType) EnumDescriptor() ([]byte, []int) {
return file_trillian_proto_rawDescGZIP(), []int{3}
}
// Represents a tree.
// Readonly attributes are assigned at tree creation, after which they may not
// be modified.
//
// Note: Many APIs within the rest of the code require these objects to
// be provided. For safety they should be obtained via Admin API calls and
// not created dynamically.
type Tree struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// ID of the tree.
// Readonly.
TreeId int64 `protobuf:"varint,1,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
// State of the tree.
// Trees are ACTIVE after creation. At any point the tree may transition
// between ACTIVE, DRAINING and FROZEN states.
TreeState TreeState `protobuf:"varint,2,opt,name=tree_state,json=treeState,proto3,enum=trillian.TreeState" json:"tree_state,omitempty"`
// Type of the tree.
// Readonly after Tree creation. Exception: Can be switched from
// PREORDERED_LOG to LOG if the Tree is and remains in the FROZEN state.
TreeType TreeType `protobuf:"varint,3,opt,name=tree_type,json=treeType,proto3,enum=trillian.TreeType" json:"tree_type,omitempty"`
// Display name of the tree.
// Optional.
DisplayName string `protobuf:"bytes,8,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
// Description of the tree,
// Optional.
Description string `protobuf:"bytes,9,opt,name=description,proto3" json:"description,omitempty"`
// Storage-specific settings.
// Varies according to the storage implementation backing Trillian.
StorageSettings *anypb.Any `protobuf:"bytes,13,opt,name=storage_settings,json=storageSettings,proto3" json:"storage_settings,omitempty"`
// Interval after which a new signed root is produced even if there have been
// no submission. If zero, this behavior is disabled.
MaxRootDuration *durationpb.Duration `protobuf:"bytes,15,opt,name=max_root_duration,json=maxRootDuration,proto3" json:"max_root_duration,omitempty"`
// Time of tree creation.
// Readonly.
CreateTime *timestamppb.Timestamp `protobuf:"bytes,16,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// Time of last tree update.
// Readonly (automatically assigned on updates).
UpdateTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
// If true, the tree has been deleted.
// Deleted trees may be undeleted during a certain time window, after which
// they're permanently deleted (and unrecoverable).
// Readonly.
Deleted bool `protobuf:"varint,19,opt,name=deleted,proto3" json:"deleted,omitempty"`
// Time of tree deletion, if any.
// Readonly.
DeleteTime *timestamppb.Timestamp `protobuf:"bytes,20,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"`
}
func (x *Tree) Reset() {
*x = Tree{}
if protoimpl.UnsafeEnabled {
mi := &file_trillian_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Tree) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Tree) ProtoMessage() {}
func (x *Tree) ProtoReflect() protoreflect.Message {
mi := &file_trillian_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Tree.ProtoReflect.Descriptor instead.
func (*Tree) Descriptor() ([]byte, []int) {
return file_trillian_proto_rawDescGZIP(), []int{0}
}
func (x *Tree) GetTreeId() int64 {
if x != nil {
return x.TreeId
}
return 0
}
func (x *Tree) GetTreeState() TreeState {
if x != nil {
return x.TreeState
}
return TreeState_UNKNOWN_TREE_STATE
}
func (x *Tree) GetTreeType() TreeType {
if x != nil {
return x.TreeType
}
return TreeType_UNKNOWN_TREE_TYPE
}
func (x *Tree) GetDisplayName() string {
if x != nil {
return x.DisplayName
}
return ""
}
func (x *Tree) GetDescription() string {
if x != nil {
return x.Description
}
return ""
}
func (x *Tree) GetStorageSettings() *anypb.Any {
if x != nil {
return x.StorageSettings
}
return nil
}
func (x *Tree) GetMaxRootDuration() *durationpb.Duration {
if x != nil {
return x.MaxRootDuration
}
return nil
}
func (x *Tree) GetCreateTime() *timestamppb.Timestamp {
if x != nil {
return x.CreateTime
}
return nil
}
func (x *Tree) GetUpdateTime() *timestamppb.Timestamp {
if x != nil {
return x.UpdateTime
}
return nil
}
func (x *Tree) GetDeleted() bool {
if x != nil {
return x.Deleted
}
return false
}
func (x *Tree) GetDeleteTime() *timestamppb.Timestamp {
if x != nil {
return x.DeleteTime
}
return nil
}
// SignedLogRoot represents a commitment by a Log to a particular tree.
//
// Note that the signature itself is no-longer provided by Trillian since
// https://github.com/google/trillian/pull/2452 .
// This functionality was intended to support a niche-use case but added
// significant complexity and was prone to causing confusion and
// misunderstanding for personality authors.
type SignedLogRoot struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// log_root holds the TLS-serialization of the following structure (described
// in RFC5246 notation):
//
// enum { v1(1), (65535)} Version;
//
// struct {
// uint64 tree_size;
// opaque root_hash<0..128>;
// uint64 timestamp_nanos;
// uint64 revision;
// opaque metadata<0..65535>;
// } LogRootV1;
//
// struct {
// Version version;
// select(version) {
// case v1: LogRootV1;
// }
// } LogRoot;
//
// A serialized v1 log root will therefore be laid out as:
//
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+-....--+
// | ver=1 | tree_size |len| root_hash |
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+-....--+
//
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// | timestamp_nanos | revision |
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
//
// +---+---+---+---+---+-....---+
// | len | metadata |
// +---+---+---+---+---+-....---+
//
// (with all integers encoded big-endian).
LogRoot []byte `protobuf:"bytes,8,opt,name=log_root,json=logRoot,proto3" json:"log_root,omitempty"`
}
func (x *SignedLogRoot) Reset() {
*x = SignedLogRoot{}
if protoimpl.UnsafeEnabled {
mi := &file_trillian_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SignedLogRoot) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SignedLogRoot) ProtoMessage() {}
func (x *SignedLogRoot) ProtoReflect() protoreflect.Message {
mi := &file_trillian_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SignedLogRoot.ProtoReflect.Descriptor instead.
func (*SignedLogRoot) Descriptor() ([]byte, []int) {
return file_trillian_proto_rawDescGZIP(), []int{1}
}
func (x *SignedLogRoot) GetLogRoot() []byte {
if x != nil {
return x.LogRoot
}
return nil
}
// Proof holds a consistency or inclusion proof for a Merkle tree, as returned
// by the API.
type Proof struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// leaf_index indicates the requested leaf index when this message is used for
// a leaf inclusion proof. This field is set to zero when this message is
// used for a consistency proof.
LeafIndex int64 `protobuf:"varint,1,opt,name=leaf_index,json=leafIndex,proto3" json:"leaf_index,omitempty"`
Hashes [][]byte `protobuf:"bytes,3,rep,name=hashes,proto3" json:"hashes,omitempty"`
}
func (x *Proof) Reset() {
*x = Proof{}
if protoimpl.UnsafeEnabled {
mi := &file_trillian_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Proof) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Proof) ProtoMessage() {}
func (x *Proof) ProtoReflect() protoreflect.Message {
mi := &file_trillian_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Proof.ProtoReflect.Descriptor instead.
func (*Proof) Descriptor() ([]byte, []int) {
return file_trillian_proto_rawDescGZIP(), []int{2}
}
func (x *Proof) GetLeafIndex() int64 {
if x != nil {
return x.LeafIndex
}
return 0
}
func (x *Proof) GetHashes() [][]byte {
if x != nil {
return x.Hashes
}
return nil
}
var File_trillian_proto protoreflect.FileDescriptor
var file_trillian_proto_rawDesc = []byte{
0x0a, 0x0e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x08, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf1, 0x05, 0x0a, 0x04, 0x54, 0x72, 0x65, 0x65, 0x12,
0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x32, 0x0a, 0x0a, 0x74, 0x72, 0x65, 0x65,
0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x74,
0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74,
0x65, 0x52, 0x09, 0x74, 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x09,
0x74, 0x72, 0x65, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32,
0x12, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x54,
0x79, 0x70, 0x65, 0x52, 0x08, 0x74, 0x72, 0x65, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a,
0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20,
0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65,
0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65,
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41,
0x6e, 0x79, 0x52, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69,
0x6e, 0x67, 0x73, 0x12, 0x45, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f,
0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x52, 0x6f,
0x6f, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x72,
0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, 0x65,
0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74,
0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
0x54, 0x69, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18,
0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x3b,
0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x14, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x04, 0x10,
0x08, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08,
0x12, 0x10, 0x13, 0x52, 0x1e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x65, 0x70,
0x6f, 0x63, 0x68, 0x52, 0x10, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70,
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0e, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x61, 0x6c, 0x67, 0x6f,
0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x0d, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x73, 0x74, 0x72, 0x61,
0x74, 0x65, 0x67, 0x79, 0x52, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65,
0x79, 0x52, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x52, 0x13, 0x73,
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74,
0x68, 0x6d, 0x52, 0x16, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x63, 0x69,
0x70, 0x68, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x1e, 0x75, 0x70, 0x64, 0x61,
0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x5f, 0x73,
0x69, 0x6e, 0x63, 0x65, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x22, 0x9d, 0x01, 0x0a, 0x0d, 0x53,
0x69, 0x67, 0x6e, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x19, 0x0a, 0x08,
0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07,
0x6c, 0x6f, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x08, 0x4a, 0x04, 0x08,
0x09, 0x10, 0x0a, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x6c,
0x6f, 0x67, 0x5f, 0x69, 0x64, 0x52, 0x12, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f,
0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x5f,
0x68, 0x61, 0x73, 0x68, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52,
0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x73,
0x52, 0x0d, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52,
0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x50, 0x0a, 0x05, 0x50, 0x72,
0x6f, 0x6f, 0x66, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65,
0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6c, 0x65, 0x61, 0x66, 0x49, 0x6e, 0x64,
0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03,
0x28, 0x0c, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03,
0x52, 0x0a, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x2a, 0x44, 0x0a, 0x0d,
0x4c, 0x6f, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x1b, 0x0a,
0x17, 0x4c, 0x4f, 0x47, 0x5f, 0x52, 0x4f, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54,
0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x4f,
0x47, 0x5f, 0x52, 0x4f, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x56, 0x31,
0x10, 0x01, 0x2a, 0x97, 0x01, 0x0a, 0x0c, 0x48, 0x61, 0x73, 0x68, 0x53, 0x74, 0x72, 0x61, 0x74,
0x65, 0x67, 0x79, 0x12, 0x19, 0x0a, 0x15, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x48,
0x41, 0x53, 0x48, 0x5f, 0x53, 0x54, 0x52, 0x41, 0x54, 0x45, 0x47, 0x59, 0x10, 0x00, 0x12, 0x12,
0x0a, 0x0e, 0x52, 0x46, 0x43, 0x36, 0x39, 0x36, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36,
0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x48,
0x41, 0x53, 0x48, 0x45, 0x52, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x4f, 0x42, 0x4a, 0x45, 0x43,
0x54, 0x5f, 0x52, 0x46, 0x43, 0x36, 0x39, 0x36, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36,
0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x49, 0x4b, 0x53, 0x5f, 0x53, 0x48, 0x41,
0x35, 0x31, 0x32, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4e,
0x49, 0x4b, 0x53, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x05, 0x2a, 0x8b, 0x01, 0x0a,
0x09, 0x54, 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x55, 0x4e,
0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x52, 0x45, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45,
0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x0a,
0x0a, 0x06, 0x46, 0x52, 0x4f, 0x5a, 0x45, 0x4e, 0x10, 0x02, 0x12, 0x1f, 0x0a, 0x17, 0x44, 0x45,
0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x4f, 0x46, 0x54, 0x5f, 0x44, 0x45,
0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x03, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x1f, 0x0a, 0x17, 0x44,
0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x48, 0x41, 0x52, 0x44, 0x5f, 0x44,
0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x04, 0x1a, 0x02, 0x08, 0x01, 0x12, 0x0c, 0x0a, 0x08,
0x44, 0x52, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x2a, 0x49, 0x0a, 0x08, 0x54, 0x72,
0x65, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
0x4e, 0x5f, 0x54, 0x52, 0x45, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a,
0x03, 0x4c, 0x4f, 0x47, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x52, 0x45, 0x4f, 0x52, 0x44,
0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x4f, 0x47, 0x10, 0x03, 0x22, 0x04, 0x08, 0x02, 0x10, 0x02,
0x2a, 0x03, 0x4d, 0x41, 0x50, 0x42, 0x48, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x42, 0x0d, 0x54, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x50, 0x72, 0x6f, 0x74,
0x6f, 0x50, 0x01, 0x5a, 0x1a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_trillian_proto_rawDescOnce sync.Once
file_trillian_proto_rawDescData = file_trillian_proto_rawDesc
)
func file_trillian_proto_rawDescGZIP() []byte {
file_trillian_proto_rawDescOnce.Do(func() {
file_trillian_proto_rawDescData = protoimpl.X.CompressGZIP(file_trillian_proto_rawDescData)
})
return file_trillian_proto_rawDescData
}
var file_trillian_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
var file_trillian_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_trillian_proto_goTypes = []interface{}{
(LogRootFormat)(0), // 0: trillian.LogRootFormat
(HashStrategy)(0), // 1: trillian.HashStrategy
(TreeState)(0), // 2: trillian.TreeState
(TreeType)(0), // 3: trillian.TreeType
(*Tree)(nil), // 4: trillian.Tree
(*SignedLogRoot)(nil), // 5: trillian.SignedLogRoot
(*Proof)(nil), // 6: trillian.Proof
(*anypb.Any)(nil), // 7: google.protobuf.Any
(*durationpb.Duration)(nil), // 8: google.protobuf.Duration
(*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp
}
var file_trillian_proto_depIdxs = []int32{
2, // 0: trillian.Tree.tree_state:type_name -> trillian.TreeState
3, // 1: trillian.Tree.tree_type:type_name -> trillian.TreeType
7, // 2: trillian.Tree.storage_settings:type_name -> google.protobuf.Any
8, // 3: trillian.Tree.max_root_duration:type_name -> google.protobuf.Duration
9, // 4: trillian.Tree.create_time:type_name -> google.protobuf.Timestamp
9, // 5: trillian.Tree.update_time:type_name -> google.protobuf.Timestamp
9, // 6: trillian.Tree.delete_time:type_name -> google.protobuf.Timestamp
7, // [7:7] is the sub-list for method output_type
7, // [7:7] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
}
func init() { file_trillian_proto_init() }
func file_trillian_proto_init() {
if File_trillian_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_trillian_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Tree); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_trillian_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SignedLogRoot); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_trillian_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Proof); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_trillian_proto_rawDesc,
NumEnums: 4,
NumMessages: 3,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_trillian_proto_goTypes,
DependencyIndexes: file_trillian_proto_depIdxs,
EnumInfos: file_trillian_proto_enumTypes,
MessageInfos: file_trillian_proto_msgTypes,
}.Build()
File_trillian_proto = out.File
file_trillian_proto_rawDesc = nil
file_trillian_proto_goTypes = nil
file_trillian_proto_depIdxs = nil
}

View File

@@ -1,241 +0,0 @@
// Copyright 2016 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
option java_multiple_files = true;
option java_package = "com.google.trillian.proto";
option java_outer_classname = "TrillianProto";
option go_package = "github.com/google/trillian";
package trillian;
import "google/protobuf/any.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
// LogRootFormat specifies the fields that are covered by the
// SignedLogRoot signature, as well as their ordering and formats.
enum LogRootFormat {
LOG_ROOT_FORMAT_UNKNOWN = 0;
LOG_ROOT_FORMAT_V1 = 1;
}
// What goes in here?
// Things which are exposed through the public trillian APIs.
// Defines the way empty / node / leaf hashes are constructed incorporating
// preimage protection, which can be application specific.
enum HashStrategy {
// Hash strategy cannot be determined. Included to enable detection of
// mismatched proto versions being used. Represents an invalid value.
UNKNOWN_HASH_STRATEGY = 0;
// Certificate Transparency strategy: leaf hash prefix = 0x00, node prefix =
// 0x01, empty hash is digest([]byte{}), as defined in the specification.
RFC6962_SHA256 = 1;
// Sparse Merkle Tree strategy: leaf hash prefix = 0x00, node prefix = 0x01,
// empty branch is recursively computed from empty leaf nodes.
// NOT secure in a multi tree environment. For testing only.
TEST_MAP_HASHER = 2;
// Append-only log strategy where leaf nodes are defined as the ObjectHash.
// All other properties are equal to RFC6962_SHA256.
OBJECT_RFC6962_SHA256 = 3;
// The CONIKS sparse tree hasher with SHA512_256 as the hash algorithm.
CONIKS_SHA512_256 = 4;
// The CONIKS sparse tree hasher with SHA256 as the hash algorithm.
CONIKS_SHA256 = 5;
}
// State of the tree.
enum TreeState {
// Tree state cannot be determined. Included to enable detection of
// mismatched proto versions being used. Represents an invalid value.
UNKNOWN_TREE_STATE = 0;
// Active trees are able to respond to both read and write requests.
ACTIVE = 1;
// Frozen trees are only able to respond to read requests, writing to a frozen
// tree is forbidden. Trees should not be frozen when there are entries
// in the queue that have not yet been integrated. See the DRAINING
// state for this case.
FROZEN = 2;
// Deprecated: now tracked in Tree.deleted.
DEPRECATED_SOFT_DELETED = 3 [deprecated = true];
// Deprecated: now tracked in Tree.deleted.
DEPRECATED_HARD_DELETED = 4 [deprecated = true];
// A tree that is draining will continue to integrate queued entries.
// No new entries should be accepted.
DRAINING = 5;
}
// Type of the tree.
enum TreeType {
// Tree type cannot be determined. Included to enable detection of mismatched
// proto versions being used. Represents an invalid value.
UNKNOWN_TREE_TYPE = 0;
// Tree represents a verifiable log.
LOG = 1;
// Tree represents a verifiable pre-ordered log, i.e., a log whose entries are
// placed according to sequence numbers assigned outside of Trillian.
PREORDERED_LOG = 3;
reserved 2;
reserved "MAP";
}
// Represents a tree.
// Readonly attributes are assigned at tree creation, after which they may not
// be modified.
//
// Note: Many APIs within the rest of the code require these objects to
// be provided. For safety they should be obtained via Admin API calls and
// not created dynamically.
message Tree {
// ID of the tree.
// Readonly.
int64 tree_id = 1;
// State of the tree.
// Trees are ACTIVE after creation. At any point the tree may transition
// between ACTIVE, DRAINING and FROZEN states.
TreeState tree_state = 2;
// Type of the tree.
// Readonly after Tree creation. Exception: Can be switched from
// PREORDERED_LOG to LOG if the Tree is and remains in the FROZEN state.
TreeType tree_type = 3;
// Display name of the tree.
// Optional.
string display_name = 8;
// Description of the tree,
// Optional.
string description = 9;
// Storage-specific settings.
// Varies according to the storage implementation backing Trillian.
google.protobuf.Any storage_settings = 13;
// Interval after which a new signed root is produced even if there have been
// no submission. If zero, this behavior is disabled.
google.protobuf.Duration max_root_duration = 15;
// Time of tree creation.
// Readonly.
google.protobuf.Timestamp create_time = 16;
// Time of last tree update.
// Readonly (automatically assigned on updates).
google.protobuf.Timestamp update_time = 17;
// If true, the tree has been deleted.
// Deleted trees may be undeleted during a certain time window, after which
// they're permanently deleted (and unrecoverable).
// Readonly.
bool deleted = 19;
// Time of tree deletion, if any.
// Readonly.
google.protobuf.Timestamp delete_time = 20;
reserved 4 to 7, 10 to 12, 14, 18;
reserved "create_time_millis_since_epoch";
reserved "duplicate_policy";
reserved "hash_algorithm";
reserved "hash_strategy";
reserved "private_key";
reserved "public_key";
reserved "signature_algorithm";
reserved "signature_cipher_suite";
reserved "update_time_millis_since_epoch";
}
// SignedLogRoot represents a commitment by a Log to a particular tree.
//
// Note that the signature itself is no-longer provided by Trillian since
// https://github.com/google/trillian/pull/2452 .
// This functionality was intended to support a niche-use case but added
// significant complexity and was prone to causing confusion and
// misunderstanding for personality authors.
message SignedLogRoot {
// log_root holds the TLS-serialization of the following structure (described
// in RFC5246 notation):
//
// enum { v1(1), (65535)} Version;
// struct {
// uint64 tree_size;
// opaque root_hash<0..128>;
// uint64 timestamp_nanos;
// uint64 revision;
// opaque metadata<0..65535>;
// } LogRootV1;
// struct {
// Version version;
// select(version) {
// case v1: LogRootV1;
// }
// } LogRoot;
//
// A serialized v1 log root will therefore be laid out as:
//
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+-....--+
// | ver=1 | tree_size |len| root_hash |
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+-....--+
//
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
// | timestamp_nanos | revision |
// +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
//
// +---+---+---+---+---+-....---+
// | len | metadata |
// +---+---+---+---+---+-....---+
//
// (with all integers encoded big-endian).
bytes log_root = 8;
reserved 1 to 7, 9;
reserved "key_hint";
reserved "log_id";
reserved "log_root_signature";
reserved "root_hash";
reserved "signature";
reserved "timestamp_nanos";
reserved "tree_revision";
reserved "tree_size";
}
// Proof holds a consistency or inclusion proof for a Merkle tree, as returned
// by the API.
message Proof {
// leaf_index indicates the requested leaf index when this message is used for
// a leaf inclusion proof. This field is set to zero when this message is
// used for a consistency proof.
int64 leaf_index = 1;
repeated bytes hashes = 3;
reserved 2;
reserved "proof_node";
}

View File

@@ -1,621 +0,0 @@
// Copyright 2016 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.30.0
// protoc v3.20.1
// source: trillian_admin_api.proto
package trillian
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// ListTrees request.
// No filters or pagination options are provided.
type ListTreesRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// If true, deleted trees are included in the response.
ShowDeleted bool `protobuf:"varint,1,opt,name=show_deleted,json=showDeleted,proto3" json:"show_deleted,omitempty"`
}
func (x *ListTreesRequest) Reset() {
*x = ListTreesRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_trillian_admin_api_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListTreesRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListTreesRequest) ProtoMessage() {}
func (x *ListTreesRequest) ProtoReflect() protoreflect.Message {
mi := &file_trillian_admin_api_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListTreesRequest.ProtoReflect.Descriptor instead.
func (*ListTreesRequest) Descriptor() ([]byte, []int) {
return file_trillian_admin_api_proto_rawDescGZIP(), []int{0}
}
func (x *ListTreesRequest) GetShowDeleted() bool {
if x != nil {
return x.ShowDeleted
}
return false
}
// ListTrees response.
// No pagination is provided, all trees the requester has access to are
// returned.
type ListTreesResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Trees matching the list request filters.
Tree []*Tree `protobuf:"bytes,1,rep,name=tree,proto3" json:"tree,omitempty"`
}
func (x *ListTreesResponse) Reset() {
*x = ListTreesResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_trillian_admin_api_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListTreesResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListTreesResponse) ProtoMessage() {}
func (x *ListTreesResponse) ProtoReflect() protoreflect.Message {
mi := &file_trillian_admin_api_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListTreesResponse.ProtoReflect.Descriptor instead.
func (*ListTreesResponse) Descriptor() ([]byte, []int) {
return file_trillian_admin_api_proto_rawDescGZIP(), []int{1}
}
func (x *ListTreesResponse) GetTree() []*Tree {
if x != nil {
return x.Tree
}
return nil
}
// GetTree request.
type GetTreeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// ID of the tree to retrieve.
TreeId int64 `protobuf:"varint,1,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
}
func (x *GetTreeRequest) Reset() {
*x = GetTreeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_trillian_admin_api_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GetTreeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetTreeRequest) ProtoMessage() {}
func (x *GetTreeRequest) ProtoReflect() protoreflect.Message {
mi := &file_trillian_admin_api_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetTreeRequest.ProtoReflect.Descriptor instead.
func (*GetTreeRequest) Descriptor() ([]byte, []int) {
return file_trillian_admin_api_proto_rawDescGZIP(), []int{2}
}
func (x *GetTreeRequest) GetTreeId() int64 {
if x != nil {
return x.TreeId
}
return 0
}
// CreateTree request.
type CreateTreeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Tree to be created. See Tree and CreateTree for more details.
Tree *Tree `protobuf:"bytes,1,opt,name=tree,proto3" json:"tree,omitempty"`
}
func (x *CreateTreeRequest) Reset() {
*x = CreateTreeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_trillian_admin_api_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CreateTreeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CreateTreeRequest) ProtoMessage() {}
func (x *CreateTreeRequest) ProtoReflect() protoreflect.Message {
mi := &file_trillian_admin_api_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CreateTreeRequest.ProtoReflect.Descriptor instead.
func (*CreateTreeRequest) Descriptor() ([]byte, []int) {
return file_trillian_admin_api_proto_rawDescGZIP(), []int{3}
}
func (x *CreateTreeRequest) GetTree() *Tree {
if x != nil {
return x.Tree
}
return nil
}
// UpdateTree request.
type UpdateTreeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Tree to be updated.
Tree *Tree `protobuf:"bytes,1,opt,name=tree,proto3" json:"tree,omitempty"`
// Fields modified by the update request.
// For example: "tree_state", "display_name", "description".
UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
}
func (x *UpdateTreeRequest) Reset() {
*x = UpdateTreeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_trillian_admin_api_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UpdateTreeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UpdateTreeRequest) ProtoMessage() {}
func (x *UpdateTreeRequest) ProtoReflect() protoreflect.Message {
mi := &file_trillian_admin_api_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UpdateTreeRequest.ProtoReflect.Descriptor instead.
func (*UpdateTreeRequest) Descriptor() ([]byte, []int) {
return file_trillian_admin_api_proto_rawDescGZIP(), []int{4}
}
func (x *UpdateTreeRequest) GetTree() *Tree {
if x != nil {
return x.Tree
}
return nil
}
func (x *UpdateTreeRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
if x != nil {
return x.UpdateMask
}
return nil
}
// DeleteTree request.
type DeleteTreeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// ID of the tree to delete.
TreeId int64 `protobuf:"varint,1,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
}
func (x *DeleteTreeRequest) Reset() {
*x = DeleteTreeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_trillian_admin_api_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DeleteTreeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteTreeRequest) ProtoMessage() {}
func (x *DeleteTreeRequest) ProtoReflect() protoreflect.Message {
mi := &file_trillian_admin_api_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteTreeRequest.ProtoReflect.Descriptor instead.
func (*DeleteTreeRequest) Descriptor() ([]byte, []int) {
return file_trillian_admin_api_proto_rawDescGZIP(), []int{5}
}
func (x *DeleteTreeRequest) GetTreeId() int64 {
if x != nil {
return x.TreeId
}
return 0
}
// UndeleteTree request.
type UndeleteTreeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// ID of the tree to undelete.
TreeId int64 `protobuf:"varint,1,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"`
}
func (x *UndeleteTreeRequest) Reset() {
*x = UndeleteTreeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_trillian_admin_api_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *UndeleteTreeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*UndeleteTreeRequest) ProtoMessage() {}
func (x *UndeleteTreeRequest) ProtoReflect() protoreflect.Message {
mi := &file_trillian_admin_api_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use UndeleteTreeRequest.ProtoReflect.Descriptor instead.
func (*UndeleteTreeRequest) Descriptor() ([]byte, []int) {
return file_trillian_admin_api_proto_rawDescGZIP(), []int{6}
}
func (x *UndeleteTreeRequest) GetTreeId() int64 {
if x != nil {
return x.TreeId
}
return 0
}
var File_trillian_admin_api_proto protoreflect.FileDescriptor
var file_trillian_admin_api_proto_rawDesc = []byte{
0x0a, 0x18, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e,
0x5f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x74, 0x72, 0x69, 0x6c,
0x6c, 0x69, 0x61, 0x6e, 0x1a, 0x0e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x72,
0x65, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x68,
0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
0x52, 0x0b, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x37, 0x0a,
0x11, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x72, 0x65, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x0e, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x54, 0x72, 0x65, 0x65,
0x52, 0x04, 0x74, 0x72, 0x65, 0x65, 0x22, 0x29, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x54, 0x72, 0x65,
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65,
0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49,
0x64, 0x22, 0x47, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e,
0x54, 0x72, 0x65, 0x65, 0x52, 0x04, 0x74, 0x72, 0x65, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03,
0x52, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0x74, 0x0a, 0x11, 0x55, 0x70,
0x64, 0x61, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x22, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x52, 0x04, 0x74,
0x72, 0x65, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61,
0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b,
0x22, 0x2c, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x22, 0x2e,
0x0a, 0x13, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x32, 0x86,
0x03, 0x0a, 0x0d, 0x54, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x41, 0x64, 0x6d, 0x69, 0x6e,
0x12, 0x46, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x72, 0x65, 0x65, 0x73, 0x12, 0x1a, 0x2e,
0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x72, 0x65,
0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x74, 0x72, 0x69, 0x6c,
0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x72, 0x65, 0x65, 0x73, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x35, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x54,
0x72, 0x65, 0x65, 0x12, 0x18, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x47,
0x65, 0x74, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e,
0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x22, 0x00, 0x12,
0x3b, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1b, 0x2e,
0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54,
0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x74, 0x72, 0x69,
0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0a,
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1b, 0x2e, 0x74, 0x72, 0x69,
0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69,
0x61, 0x6e, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0a, 0x44, 0x65, 0x6c,
0x65, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1b, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69,
0x61, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e,
0x54, 0x72, 0x65, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x0c, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65,
0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1d, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61,
0x6e, 0x2e, 0x55, 0x6e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e,
0x2e, 0x54, 0x72, 0x65, 0x65, 0x22, 0x00, 0x42, 0x50, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x42, 0x15, 0x54, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x41, 0x64,
0x6d, 0x69, 0x6e, 0x41, 0x70, 0x69, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x1a, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2f, 0x74, 0x72, 0x69, 0x6c, 0x6c, 0x69, 0x61, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (
file_trillian_admin_api_proto_rawDescOnce sync.Once
file_trillian_admin_api_proto_rawDescData = file_trillian_admin_api_proto_rawDesc
)
func file_trillian_admin_api_proto_rawDescGZIP() []byte {
file_trillian_admin_api_proto_rawDescOnce.Do(func() {
file_trillian_admin_api_proto_rawDescData = protoimpl.X.CompressGZIP(file_trillian_admin_api_proto_rawDescData)
})
return file_trillian_admin_api_proto_rawDescData
}
var file_trillian_admin_api_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_trillian_admin_api_proto_goTypes = []interface{}{
(*ListTreesRequest)(nil), // 0: trillian.ListTreesRequest
(*ListTreesResponse)(nil), // 1: trillian.ListTreesResponse
(*GetTreeRequest)(nil), // 2: trillian.GetTreeRequest
(*CreateTreeRequest)(nil), // 3: trillian.CreateTreeRequest
(*UpdateTreeRequest)(nil), // 4: trillian.UpdateTreeRequest
(*DeleteTreeRequest)(nil), // 5: trillian.DeleteTreeRequest
(*UndeleteTreeRequest)(nil), // 6: trillian.UndeleteTreeRequest
(*Tree)(nil), // 7: trillian.Tree
(*fieldmaskpb.FieldMask)(nil), // 8: google.protobuf.FieldMask
}
var file_trillian_admin_api_proto_depIdxs = []int32{
7, // 0: trillian.ListTreesResponse.tree:type_name -> trillian.Tree
7, // 1: trillian.CreateTreeRequest.tree:type_name -> trillian.Tree
7, // 2: trillian.UpdateTreeRequest.tree:type_name -> trillian.Tree
8, // 3: trillian.UpdateTreeRequest.update_mask:type_name -> google.protobuf.FieldMask
0, // 4: trillian.TrillianAdmin.ListTrees:input_type -> trillian.ListTreesRequest
2, // 5: trillian.TrillianAdmin.GetTree:input_type -> trillian.GetTreeRequest
3, // 6: trillian.TrillianAdmin.CreateTree:input_type -> trillian.CreateTreeRequest
4, // 7: trillian.TrillianAdmin.UpdateTree:input_type -> trillian.UpdateTreeRequest
5, // 8: trillian.TrillianAdmin.DeleteTree:input_type -> trillian.DeleteTreeRequest
6, // 9: trillian.TrillianAdmin.UndeleteTree:input_type -> trillian.UndeleteTreeRequest
1, // 10: trillian.TrillianAdmin.ListTrees:output_type -> trillian.ListTreesResponse
7, // 11: trillian.TrillianAdmin.GetTree:output_type -> trillian.Tree
7, // 12: trillian.TrillianAdmin.CreateTree:output_type -> trillian.Tree
7, // 13: trillian.TrillianAdmin.UpdateTree:output_type -> trillian.Tree
7, // 14: trillian.TrillianAdmin.DeleteTree:output_type -> trillian.Tree
7, // 15: trillian.TrillianAdmin.UndeleteTree:output_type -> trillian.Tree
10, // [10:16] is the sub-list for method output_type
4, // [4:10] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_trillian_admin_api_proto_init() }
func file_trillian_admin_api_proto_init() {
if File_trillian_admin_api_proto != nil {
return
}
file_trillian_proto_init()
if !protoimpl.UnsafeEnabled {
file_trillian_admin_api_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListTreesRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_trillian_admin_api_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListTreesResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_trillian_admin_api_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetTreeRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_trillian_admin_api_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CreateTreeRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_trillian_admin_api_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UpdateTreeRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_trillian_admin_api_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DeleteTreeRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_trillian_admin_api_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UndeleteTreeRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_trillian_admin_api_proto_rawDesc,
NumEnums: 0,
NumMessages: 7,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_trillian_admin_api_proto_goTypes,
DependencyIndexes: file_trillian_admin_api_proto_depIdxs,
MessageInfos: file_trillian_admin_api_proto_msgTypes,
}.Build()
File_trillian_admin_api_proto = out.File
file_trillian_admin_api_proto_rawDesc = nil
file_trillian_admin_api_proto_goTypes = nil
file_trillian_admin_api_proto_depIdxs = nil
}

View File

@@ -1,107 +0,0 @@
// Copyright 2016 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
option java_multiple_files = true;
option java_package = "com.google.trillian.proto";
option java_outer_classname = "TrillianAdminApiProto";
option go_package = "github.com/google/trillian";
package trillian;
import "trillian.proto";
import "google/protobuf/field_mask.proto";
// ListTrees request.
// No filters or pagination options are provided.
message ListTreesRequest {
// If true, deleted trees are included in the response.
bool show_deleted = 1;
}
// ListTrees response.
// No pagination is provided, all trees the requester has access to are
// returned.
message ListTreesResponse {
// Trees matching the list request filters.
repeated Tree tree = 1;
}
// GetTree request.
message GetTreeRequest {
// ID of the tree to retrieve.
int64 tree_id = 1;
}
// CreateTree request.
message CreateTreeRequest {
// Tree to be created. See Tree and CreateTree for more details.
Tree tree = 1;
reserved 2;
reserved "key_spec";
}
// UpdateTree request.
message UpdateTreeRequest {
// Tree to be updated.
Tree tree = 1;
// Fields modified by the update request.
// For example: "tree_state", "display_name", "description".
google.protobuf.FieldMask update_mask = 2;
}
// DeleteTree request.
message DeleteTreeRequest {
// ID of the tree to delete.
int64 tree_id = 1;
}
// UndeleteTree request.
message UndeleteTreeRequest {
// ID of the tree to undelete.
int64 tree_id = 1;
}
// Trillian Administrative interface.
// Allows creation and management of Trillian trees.
service TrillianAdmin {
// Lists all trees the requester has access to.
rpc ListTrees(ListTreesRequest) returns (ListTreesResponse) {}
// Retrieves a tree by ID.
rpc GetTree(GetTreeRequest) returns (Tree) {}
// Creates a new tree.
// System-generated fields are not required and will be ignored if present,
// e.g.: tree_id, create_time and update_time.
// Returns the created tree, with all system-generated fields assigned.
rpc CreateTree(CreateTreeRequest) returns (Tree) {}
// Updates a tree.
// See Tree for details. Readonly fields cannot be updated.
rpc UpdateTree(UpdateTreeRequest) returns (Tree) {}
// Soft-deletes a tree.
// A soft-deleted tree may be undeleted for a certain period, after which
// it'll be permanently deleted.
rpc DeleteTree(DeleteTreeRequest) returns (Tree) {}
// Undeletes a soft-deleted a tree.
// A soft-deleted tree may be undeleted for a certain period, after which
// it'll be permanently deleted.
rpc UndeleteTree(UndeleteTreeRequest) returns (Tree) {}
}

View File

@@ -1,334 +0,0 @@
// Copyright 2016 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v3.20.1
// source: trillian_admin_api.proto
package trillian
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
const (
TrillianAdmin_ListTrees_FullMethodName = "/trillian.TrillianAdmin/ListTrees"
TrillianAdmin_GetTree_FullMethodName = "/trillian.TrillianAdmin/GetTree"
TrillianAdmin_CreateTree_FullMethodName = "/trillian.TrillianAdmin/CreateTree"
TrillianAdmin_UpdateTree_FullMethodName = "/trillian.TrillianAdmin/UpdateTree"
TrillianAdmin_DeleteTree_FullMethodName = "/trillian.TrillianAdmin/DeleteTree"
TrillianAdmin_UndeleteTree_FullMethodName = "/trillian.TrillianAdmin/UndeleteTree"
)
// TrillianAdminClient is the client API for TrillianAdmin service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type TrillianAdminClient interface {
// Lists all trees the requester has access to.
ListTrees(ctx context.Context, in *ListTreesRequest, opts ...grpc.CallOption) (*ListTreesResponse, error)
// Retrieves a tree by ID.
GetTree(ctx context.Context, in *GetTreeRequest, opts ...grpc.CallOption) (*Tree, error)
// Creates a new tree.
// System-generated fields are not required and will be ignored if present,
// e.g.: tree_id, create_time and update_time.
// Returns the created tree, with all system-generated fields assigned.
CreateTree(ctx context.Context, in *CreateTreeRequest, opts ...grpc.CallOption) (*Tree, error)
// Updates a tree.
// See Tree for details. Readonly fields cannot be updated.
UpdateTree(ctx context.Context, in *UpdateTreeRequest, opts ...grpc.CallOption) (*Tree, error)
// Soft-deletes a tree.
// A soft-deleted tree may be undeleted for a certain period, after which
// it'll be permanently deleted.
DeleteTree(ctx context.Context, in *DeleteTreeRequest, opts ...grpc.CallOption) (*Tree, error)
// Undeletes a soft-deleted a tree.
// A soft-deleted tree may be undeleted for a certain period, after which
// it'll be permanently deleted.
UndeleteTree(ctx context.Context, in *UndeleteTreeRequest, opts ...grpc.CallOption) (*Tree, error)
}
type trillianAdminClient struct {
cc grpc.ClientConnInterface
}
func NewTrillianAdminClient(cc grpc.ClientConnInterface) TrillianAdminClient {
return &trillianAdminClient{cc}
}
func (c *trillianAdminClient) ListTrees(ctx context.Context, in *ListTreesRequest, opts ...grpc.CallOption) (*ListTreesResponse, error) {
out := new(ListTreesResponse)
err := c.cc.Invoke(ctx, TrillianAdmin_ListTrees_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *trillianAdminClient) GetTree(ctx context.Context, in *GetTreeRequest, opts ...grpc.CallOption) (*Tree, error) {
out := new(Tree)
err := c.cc.Invoke(ctx, TrillianAdmin_GetTree_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *trillianAdminClient) CreateTree(ctx context.Context, in *CreateTreeRequest, opts ...grpc.CallOption) (*Tree, error) {
out := new(Tree)
err := c.cc.Invoke(ctx, TrillianAdmin_CreateTree_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *trillianAdminClient) UpdateTree(ctx context.Context, in *UpdateTreeRequest, opts ...grpc.CallOption) (*Tree, error) {
out := new(Tree)
err := c.cc.Invoke(ctx, TrillianAdmin_UpdateTree_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *trillianAdminClient) DeleteTree(ctx context.Context, in *DeleteTreeRequest, opts ...grpc.CallOption) (*Tree, error) {
out := new(Tree)
err := c.cc.Invoke(ctx, TrillianAdmin_DeleteTree_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *trillianAdminClient) UndeleteTree(ctx context.Context, in *UndeleteTreeRequest, opts ...grpc.CallOption) (*Tree, error) {
out := new(Tree)
err := c.cc.Invoke(ctx, TrillianAdmin_UndeleteTree_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// TrillianAdminServer is the server API for TrillianAdmin service.
// All implementations should embed UnimplementedTrillianAdminServer
// for forward compatibility
type TrillianAdminServer interface {
// Lists all trees the requester has access to.
ListTrees(context.Context, *ListTreesRequest) (*ListTreesResponse, error)
// Retrieves a tree by ID.
GetTree(context.Context, *GetTreeRequest) (*Tree, error)
// Creates a new tree.
// System-generated fields are not required and will be ignored if present,
// e.g.: tree_id, create_time and update_time.
// Returns the created tree, with all system-generated fields assigned.
CreateTree(context.Context, *CreateTreeRequest) (*Tree, error)
// Updates a tree.
// See Tree for details. Readonly fields cannot be updated.
UpdateTree(context.Context, *UpdateTreeRequest) (*Tree, error)
// Soft-deletes a tree.
// A soft-deleted tree may be undeleted for a certain period, after which
// it'll be permanently deleted.
DeleteTree(context.Context, *DeleteTreeRequest) (*Tree, error)
// Undeletes a soft-deleted a tree.
// A soft-deleted tree may be undeleted for a certain period, after which
// it'll be permanently deleted.
UndeleteTree(context.Context, *UndeleteTreeRequest) (*Tree, error)
}
// UnimplementedTrillianAdminServer should be embedded to have forward compatible implementations.
type UnimplementedTrillianAdminServer struct {
}
func (UnimplementedTrillianAdminServer) ListTrees(context.Context, *ListTreesRequest) (*ListTreesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListTrees not implemented")
}
func (UnimplementedTrillianAdminServer) GetTree(context.Context, *GetTreeRequest) (*Tree, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetTree not implemented")
}
func (UnimplementedTrillianAdminServer) CreateTree(context.Context, *CreateTreeRequest) (*Tree, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateTree not implemented")
}
func (UnimplementedTrillianAdminServer) UpdateTree(context.Context, *UpdateTreeRequest) (*Tree, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateTree not implemented")
}
func (UnimplementedTrillianAdminServer) DeleteTree(context.Context, *DeleteTreeRequest) (*Tree, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteTree not implemented")
}
func (UnimplementedTrillianAdminServer) UndeleteTree(context.Context, *UndeleteTreeRequest) (*Tree, error) {
return nil, status.Errorf(codes.Unimplemented, "method UndeleteTree not implemented")
}
// UnsafeTrillianAdminServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to TrillianAdminServer will
// result in compilation errors.
type UnsafeTrillianAdminServer interface {
mustEmbedUnimplementedTrillianAdminServer()
}
func RegisterTrillianAdminServer(s grpc.ServiceRegistrar, srv TrillianAdminServer) {
s.RegisterService(&TrillianAdmin_ServiceDesc, srv)
}
func _TrillianAdmin_ListTrees_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListTreesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TrillianAdminServer).ListTrees(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: TrillianAdmin_ListTrees_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianAdminServer).ListTrees(ctx, req.(*ListTreesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TrillianAdmin_GetTree_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetTreeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TrillianAdminServer).GetTree(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: TrillianAdmin_GetTree_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianAdminServer).GetTree(ctx, req.(*GetTreeRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TrillianAdmin_CreateTree_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateTreeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TrillianAdminServer).CreateTree(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: TrillianAdmin_CreateTree_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianAdminServer).CreateTree(ctx, req.(*CreateTreeRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TrillianAdmin_UpdateTree_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateTreeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TrillianAdminServer).UpdateTree(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: TrillianAdmin_UpdateTree_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianAdminServer).UpdateTree(ctx, req.(*UpdateTreeRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TrillianAdmin_DeleteTree_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteTreeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TrillianAdminServer).DeleteTree(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: TrillianAdmin_DeleteTree_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianAdminServer).DeleteTree(ctx, req.(*DeleteTreeRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TrillianAdmin_UndeleteTree_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UndeleteTreeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TrillianAdminServer).UndeleteTree(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: TrillianAdmin_UndeleteTree_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianAdminServer).UndeleteTree(ctx, req.(*UndeleteTreeRequest))
}
return interceptor(ctx, in, info, handler)
}
// TrillianAdmin_ServiceDesc is the grpc.ServiceDesc for TrillianAdmin service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var TrillianAdmin_ServiceDesc = grpc.ServiceDesc{
ServiceName: "trillian.TrillianAdmin",
HandlerType: (*TrillianAdminServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ListTrees",
Handler: _TrillianAdmin_ListTrees_Handler,
},
{
MethodName: "GetTree",
Handler: _TrillianAdmin_GetTree_Handler,
},
{
MethodName: "CreateTree",
Handler: _TrillianAdmin_CreateTree_Handler,
},
{
MethodName: "UpdateTree",
Handler: _TrillianAdmin_UpdateTree_Handler,
},
{
MethodName: "DeleteTree",
Handler: _TrillianAdmin_DeleteTree_Handler,
},
{
MethodName: "UndeleteTree",
Handler: _TrillianAdmin_UndeleteTree_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "trillian_admin_api.proto",
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,363 +0,0 @@
// Copyright 2016 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package trillian;
option go_package = "github.com/google/trillian";
option java_multiple_files = true;
option java_outer_classname = "TrillianLogApiProto";
option java_package = "com.google.trillian.proto";
import "google/protobuf/timestamp.proto";
import "google/rpc/status.proto";
import "trillian.proto";
// The TrillianLog service provides access to an append-only Log data structure
// as described in the [Verifiable Data
// Structures](docs/papers/VerifiableDataStructures.pdf) paper.
//
// The API supports adding new entries to the Merkle tree for a specific Log
// instance (identified by its log_id) in two modes:
// - For a normal log, new leaf entries are queued up for subsequent
// inclusion in the log, and the leaves are assigned consecutive leaf_index
// values as part of that integration process.
// - For a 'pre-ordered log', new entries have an already-defined leaf
// ordering, and leaves are only integrated into the Merkle tree when a
// contiguous range of leaves is available.
//
// The API also supports read operations to retrieve leaf contents, and to
// provide cryptographic proofs of leaf inclusion and of the append-only nature
// of the Log.
//
// Each API request also includes a charge_to field, which allows API users
// to provide quota identifiers that should be "charged" for each API request
// (and potentially rejected with codes.ResourceExhausted).
//
// Various operations on the API also allows for 'server skew', which can occur
// when different API requests happen to be handled by different server instances
// that may not all be up to date. An API request that is relative to a specific
// tree size may reach a server instance that is not yet aware of this tree size;
// in this case the server will typically return an OK response that contains:
// - a signed log root that indicates the tree size that it is aware of
// - an empty response otherwise.
service TrillianLog {
// QueueLeaf adds a single leaf to the queue of pending leaves for a normal
// log.
rpc QueueLeaf(QueueLeafRequest) returns (QueueLeafResponse) {}
// GetInclusionProof returns an inclusion proof for a leaf with a given index
// in a particular tree.
//
// If the requested tree_size is larger than the server is aware of, the
// response will include the latest known log root and an empty proof.
rpc GetInclusionProof(GetInclusionProofRequest)
returns (GetInclusionProofResponse) {}
// GetInclusionProofByHash returns an inclusion proof for any leaves that have
// the given Merkle hash in a particular tree.
//
// If any of the leaves that match the given Merkle has have a leaf index that
// is beyond the requested tree size, the corresponding proof entry will be empty.
rpc GetInclusionProofByHash(GetInclusionProofByHashRequest)
returns (GetInclusionProofByHashResponse) {}
// GetConsistencyProof returns a consistency proof between different sizes of
// a particular tree.
//
// If the requested tree size is larger than the server is aware of,
// the response will include the latest known log root and an empty proof.
rpc GetConsistencyProof(GetConsistencyProofRequest)
returns (GetConsistencyProofResponse) {}
// GetLatestSignedLogRoot returns the latest log root for a given tree,
// and optionally also includes a consistency proof from an earlier tree size
// to the new size of the tree.
//
// If the earlier tree size is larger than the server is aware of,
// an InvalidArgument error is returned.
rpc GetLatestSignedLogRoot(GetLatestSignedLogRootRequest)
returns (GetLatestSignedLogRootResponse) {}
// GetEntryAndProof returns a log leaf and the corresponding inclusion proof
// to a specified tree size, for a given leaf index in a particular tree.
//
// If the requested tree size is unavailable but the leaf is
// in scope for the current tree, the returned proof will be for the
// current tree size rather than the requested tree size.
rpc GetEntryAndProof(GetEntryAndProofRequest)
returns (GetEntryAndProofResponse) {}
// InitLog initializes a particular tree, creating the initial signed log
// root (which will be of size 0).
rpc InitLog(InitLogRequest) returns (InitLogResponse) {}
// AddSequencedLeaves adds a batch of leaves with assigned sequence numbers
// to a pre-ordered log. The indices of the provided leaves must be contiguous.
rpc AddSequencedLeaves(AddSequencedLeavesRequest)
returns (AddSequencedLeavesResponse) {}
// GetLeavesByRange returns a batch of leaves whose leaf indices are in a
// sequential range.
rpc GetLeavesByRange(GetLeavesByRangeRequest)
returns (GetLeavesByRangeResponse) {}
}
// ChargeTo describes the user(s) associated with the request whose quota should
// be checked and charged.
message ChargeTo {
// user is a list of personality-defined strings.
// Trillian will treat them as /User/%{user}/... keys when checking and
// charging quota.
// If one or more of the specified users has insufficient quota, the
// request will be denied.
//
// As an example, a Certificate Transparency frontend might set the following
// user strings when sending a QueueLeaf request to the Trillian log:
// - The requesting IP address.
// This would limit the number of requests per IP.
// - The "intermediate-<hash>" for each of the intermediate certificates in
// the submitted chain.
// This would have the effect of limiting the rate of submissions under
// a given intermediate/root.
repeated string user = 1;
}
message QueueLeafRequest {
int64 log_id = 1;
LogLeaf leaf = 2;
ChargeTo charge_to = 3;
}
message QueueLeafResponse {
// queued_leaf describes the leaf which is or will be incorporated into the
// Log. If the submitted leaf was already present in the Log (as indicated by
// its leaf identity hash), then the returned leaf will be the pre-existing
// leaf entry rather than the submitted leaf.
QueuedLogLeaf queued_leaf = 2;
}
message GetInclusionProofRequest {
int64 log_id = 1;
int64 leaf_index = 2;
int64 tree_size = 3;
ChargeTo charge_to = 4;
}
message GetInclusionProofResponse {
// The proof field may be empty if the requested tree_size was larger
// than that available at the server (e.g. because there is skew between
// server instances, and an earlier client request was processed by a
// more up-to-date instance). In this case, the signed_log_root
// field will indicate the tree size that the server is aware of, and
// the proof field will be empty.
Proof proof = 2;
SignedLogRoot signed_log_root = 3;
}
message GetInclusionProofByHashRequest {
int64 log_id = 1;
// The leaf hash field provides the Merkle tree hash of the leaf entry
// to be retrieved.
bytes leaf_hash = 2;
int64 tree_size = 3;
bool order_by_sequence = 4;
ChargeTo charge_to = 5;
}
message GetInclusionProofByHashResponse {
// Logs can potentially contain leaves with duplicate hashes so it's possible
// for this to return multiple proofs. If the leaf index for a particular
// instance of the requested Merkle leaf hash is beyond the requested tree
// size, the corresponding proof entry will be missing.
repeated Proof proof = 2;
SignedLogRoot signed_log_root = 3;
}
message GetConsistencyProofRequest {
int64 log_id = 1;
int64 first_tree_size = 2;
int64 second_tree_size = 3;
ChargeTo charge_to = 4;
}
message GetConsistencyProofResponse {
// The proof field may be empty if the requested tree_size was larger
// than that available at the server (e.g. because there is skew between
// server instances, and an earlier client request was processed by a
// more up-to-date instance). In this case, the signed_log_root
// field will indicate the tree size that the server is aware of, and
// the proof field will be empty.
Proof proof = 2;
SignedLogRoot signed_log_root = 3;
}
message GetLatestSignedLogRootRequest {
int64 log_id = 1;
ChargeTo charge_to = 2;
// If first_tree_size is non-zero, the response will include a consistency
// proof between first_tree_size and the new tree size (if not smaller).
int64 first_tree_size = 3;
}
message GetLatestSignedLogRootResponse {
SignedLogRoot signed_log_root = 2;
// proof is filled in with a consistency proof if first_tree_size in
// GetLatestSignedLogRootRequest is non-zero (and within the tree size
// available at the server).
Proof proof = 3;
}
message GetEntryAndProofRequest {
int64 log_id = 1;
int64 leaf_index = 2;
int64 tree_size = 3;
ChargeTo charge_to = 4;
}
message GetEntryAndProofResponse {
Proof proof = 2;
LogLeaf leaf = 3;
SignedLogRoot signed_log_root = 4;
}
message InitLogRequest {
int64 log_id = 1;
ChargeTo charge_to = 2;
}
message InitLogResponse {
SignedLogRoot created = 1;
}
message AddSequencedLeavesRequest {
int64 log_id = 1;
repeated LogLeaf leaves = 2;
ChargeTo charge_to = 4;
}
message AddSequencedLeavesResponse {
// Same number and order as in the corresponding request.
repeated QueuedLogLeaf results = 2;
}
message GetLeavesByRangeRequest {
int64 log_id = 1;
int64 start_index = 2;
int64 count = 3;
ChargeTo charge_to = 4;
}
message GetLeavesByRangeResponse {
// Returned log leaves starting from the `start_index` of the request, in
// order. There may be fewer than `request.count` leaves returned, if the
// requested range extended beyond the size of the tree or if the server opted
// to return fewer leaves than requested.
repeated LogLeaf leaves = 1;
SignedLogRoot signed_log_root = 2;
}
// QueuedLogLeaf provides the result of submitting an entry to the log.
// TODO(pavelkalinnikov): Consider renaming it to AddLogLeafResult or the like.
message QueuedLogLeaf {
// The leaf as it was stored by Trillian. Empty unless `status.code` is:
// - `google.rpc.OK`: the `leaf` data is the same as in the request.
// - `google.rpc.ALREADY_EXISTS` or 'google.rpc.FAILED_PRECONDITION`: the
// `leaf` is the conflicting one already in the log.
LogLeaf leaf = 1;
// The status of adding the leaf.
// - `google.rpc.OK`: successfully added.
// - `google.rpc.ALREADY_EXISTS`: the leaf is a duplicate of an already
// existing one. Either `leaf_identity_hash` is the same in the `LOG`
// mode, or `leaf_index` in the `PREORDERED_LOG`.
// - `google.rpc.FAILED_PRECONDITION`: A conflicting entry is already
// present in the log, e.g., same `leaf_index` but different `leaf_data`.
google.rpc.Status status = 2;
}
// LogLeaf describes a leaf in the Log's Merkle tree, corresponding to a single log entry.
// Each leaf has a unique leaf index in the scope of this tree. Clients submitting new
// leaf entries should only set the following fields:
// - leaf_value
// - extra_data (optionally)
// - leaf_identity_hash (optionally)
// - leaf_index (iff the log is a PREORDERED_LOG)
message LogLeaf {
// merkle_leaf_hash holds the Merkle leaf hash over leaf_value. This is
// calculated by the Trillian server when leaves are added to the tree, using
// the defined hashing algorithm and strategy for the tree; as such, the client
// does not need to set it on leaf submissions.
bytes merkle_leaf_hash = 1;
// leaf_value holds the data that forms the value of the Merkle tree leaf.
// The client should set this field on all leaf submissions, and is
// responsible for ensuring its validity (the Trillian server treats it as an
// opaque blob).
bytes leaf_value = 2;
// extra_data holds additional data associated with the Merkle tree leaf.
// The client may set this data on leaf submissions, and the Trillian server
// will return it on subsequent read operations. However, the contents of
// this field are not covered by and do not affect the Merkle tree hash
// calculations.
bytes extra_data = 3;
// leaf_index indicates the index of this leaf in the Merkle tree.
// This field is returned on all read operations, but should only be
// set for leaf submissions in PREORDERED_LOG mode (for a normal log
// the leaf index is assigned by Trillian when the submitted leaf is
// integrated into the Merkle tree).
int64 leaf_index = 4;
// leaf_identity_hash provides a hash value that indicates the client's
// concept of which leaf entries should be considered identical.
//
// This mechanism allows the client personality to indicate that two leaves
// should be considered "duplicates" even though their `leaf_value`s differ.
//
// If this is not set on leaf submissions, the Trillian server will take its
// value to be the same as merkle_leaf_hash (and thus only leaves with
// identical leaf_value contents will be considered identical).
//
// For example, in Certificate Transparency each certificate submission is
// associated with a submission timestamp, but subsequent submissions of the
// same certificate should be considered identical. This is achieved
// by setting the leaf identity hash to a hash over (just) the certificate,
// whereas the Merkle leaf hash encompasses both the certificate and its
// submission time -- allowing duplicate certificates to be detected.
//
//
// Continuing the CT example, for a CT mirror personality (which must allow
// dupes since the source log could contain them), the part of the
// personality which fetches and submits the entries might set
// `leaf_identity_hash` to `H(leaf_index||cert)`.
//
// TODO(pavelkalinnikov): Consider instead using `H(cert)` and allowing
// identity hash dupes in `PREORDERED_LOG` mode, for it can later be
// upgraded to `LOG` which will need to correctly detect duplicates with
// older entries when new ones get queued.
bytes leaf_identity_hash = 5;
// queue_timestamp holds the time at which this leaf was queued for
// inclusion in the Log, or zero if the entry was submitted without
// queuing. Clients should not set this field on submissions.
google.protobuf.Timestamp queue_timestamp = 6;
// integrate_timestamp holds the time at which this leaf was integrated into
// the tree. Clients should not set this field on submissions.
google.protobuf.Timestamp integrate_timestamp = 7;
}

View File

@@ -1,487 +0,0 @@
// Copyright 2016 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc v3.20.1
// source: trillian_log_api.proto
package trillian
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
const (
TrillianLog_QueueLeaf_FullMethodName = "/trillian.TrillianLog/QueueLeaf"
TrillianLog_GetInclusionProof_FullMethodName = "/trillian.TrillianLog/GetInclusionProof"
TrillianLog_GetInclusionProofByHash_FullMethodName = "/trillian.TrillianLog/GetInclusionProofByHash"
TrillianLog_GetConsistencyProof_FullMethodName = "/trillian.TrillianLog/GetConsistencyProof"
TrillianLog_GetLatestSignedLogRoot_FullMethodName = "/trillian.TrillianLog/GetLatestSignedLogRoot"
TrillianLog_GetEntryAndProof_FullMethodName = "/trillian.TrillianLog/GetEntryAndProof"
TrillianLog_InitLog_FullMethodName = "/trillian.TrillianLog/InitLog"
TrillianLog_AddSequencedLeaves_FullMethodName = "/trillian.TrillianLog/AddSequencedLeaves"
TrillianLog_GetLeavesByRange_FullMethodName = "/trillian.TrillianLog/GetLeavesByRange"
)
// TrillianLogClient is the client API for TrillianLog service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type TrillianLogClient interface {
// QueueLeaf adds a single leaf to the queue of pending leaves for a normal
// log.
QueueLeaf(ctx context.Context, in *QueueLeafRequest, opts ...grpc.CallOption) (*QueueLeafResponse, error)
// GetInclusionProof returns an inclusion proof for a leaf with a given index
// in a particular tree.
//
// If the requested tree_size is larger than the server is aware of, the
// response will include the latest known log root and an empty proof.
GetInclusionProof(ctx context.Context, in *GetInclusionProofRequest, opts ...grpc.CallOption) (*GetInclusionProofResponse, error)
// GetInclusionProofByHash returns an inclusion proof for any leaves that have
// the given Merkle hash in a particular tree.
//
// If any of the leaves that match the given Merkle has have a leaf index that
// is beyond the requested tree size, the corresponding proof entry will be empty.
GetInclusionProofByHash(ctx context.Context, in *GetInclusionProofByHashRequest, opts ...grpc.CallOption) (*GetInclusionProofByHashResponse, error)
// GetConsistencyProof returns a consistency proof between different sizes of
// a particular tree.
//
// If the requested tree size is larger than the server is aware of,
// the response will include the latest known log root and an empty proof.
GetConsistencyProof(ctx context.Context, in *GetConsistencyProofRequest, opts ...grpc.CallOption) (*GetConsistencyProofResponse, error)
// GetLatestSignedLogRoot returns the latest log root for a given tree,
// and optionally also includes a consistency proof from an earlier tree size
// to the new size of the tree.
//
// If the earlier tree size is larger than the server is aware of,
// an InvalidArgument error is returned.
GetLatestSignedLogRoot(ctx context.Context, in *GetLatestSignedLogRootRequest, opts ...grpc.CallOption) (*GetLatestSignedLogRootResponse, error)
// GetEntryAndProof returns a log leaf and the corresponding inclusion proof
// to a specified tree size, for a given leaf index in a particular tree.
//
// If the requested tree size is unavailable but the leaf is
// in scope for the current tree, the returned proof will be for the
// current tree size rather than the requested tree size.
GetEntryAndProof(ctx context.Context, in *GetEntryAndProofRequest, opts ...grpc.CallOption) (*GetEntryAndProofResponse, error)
// InitLog initializes a particular tree, creating the initial signed log
// root (which will be of size 0).
InitLog(ctx context.Context, in *InitLogRequest, opts ...grpc.CallOption) (*InitLogResponse, error)
// AddSequencedLeaves adds a batch of leaves with assigned sequence numbers
// to a pre-ordered log. The indices of the provided leaves must be contiguous.
AddSequencedLeaves(ctx context.Context, in *AddSequencedLeavesRequest, opts ...grpc.CallOption) (*AddSequencedLeavesResponse, error)
// GetLeavesByRange returns a batch of leaves whose leaf indices are in a
// sequential range.
GetLeavesByRange(ctx context.Context, in *GetLeavesByRangeRequest, opts ...grpc.CallOption) (*GetLeavesByRangeResponse, error)
}
type trillianLogClient struct {
cc grpc.ClientConnInterface
}
func NewTrillianLogClient(cc grpc.ClientConnInterface) TrillianLogClient {
return &trillianLogClient{cc}
}
func (c *trillianLogClient) QueueLeaf(ctx context.Context, in *QueueLeafRequest, opts ...grpc.CallOption) (*QueueLeafResponse, error) {
out := new(QueueLeafResponse)
err := c.cc.Invoke(ctx, TrillianLog_QueueLeaf_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *trillianLogClient) GetInclusionProof(ctx context.Context, in *GetInclusionProofRequest, opts ...grpc.CallOption) (*GetInclusionProofResponse, error) {
out := new(GetInclusionProofResponse)
err := c.cc.Invoke(ctx, TrillianLog_GetInclusionProof_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *trillianLogClient) GetInclusionProofByHash(ctx context.Context, in *GetInclusionProofByHashRequest, opts ...grpc.CallOption) (*GetInclusionProofByHashResponse, error) {
out := new(GetInclusionProofByHashResponse)
err := c.cc.Invoke(ctx, TrillianLog_GetInclusionProofByHash_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *trillianLogClient) GetConsistencyProof(ctx context.Context, in *GetConsistencyProofRequest, opts ...grpc.CallOption) (*GetConsistencyProofResponse, error) {
out := new(GetConsistencyProofResponse)
err := c.cc.Invoke(ctx, TrillianLog_GetConsistencyProof_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *trillianLogClient) GetLatestSignedLogRoot(ctx context.Context, in *GetLatestSignedLogRootRequest, opts ...grpc.CallOption) (*GetLatestSignedLogRootResponse, error) {
out := new(GetLatestSignedLogRootResponse)
err := c.cc.Invoke(ctx, TrillianLog_GetLatestSignedLogRoot_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *trillianLogClient) GetEntryAndProof(ctx context.Context, in *GetEntryAndProofRequest, opts ...grpc.CallOption) (*GetEntryAndProofResponse, error) {
out := new(GetEntryAndProofResponse)
err := c.cc.Invoke(ctx, TrillianLog_GetEntryAndProof_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *trillianLogClient) InitLog(ctx context.Context, in *InitLogRequest, opts ...grpc.CallOption) (*InitLogResponse, error) {
out := new(InitLogResponse)
err := c.cc.Invoke(ctx, TrillianLog_InitLog_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *trillianLogClient) AddSequencedLeaves(ctx context.Context, in *AddSequencedLeavesRequest, opts ...grpc.CallOption) (*AddSequencedLeavesResponse, error) {
out := new(AddSequencedLeavesResponse)
err := c.cc.Invoke(ctx, TrillianLog_AddSequencedLeaves_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *trillianLogClient) GetLeavesByRange(ctx context.Context, in *GetLeavesByRangeRequest, opts ...grpc.CallOption) (*GetLeavesByRangeResponse, error) {
out := new(GetLeavesByRangeResponse)
err := c.cc.Invoke(ctx, TrillianLog_GetLeavesByRange_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// TrillianLogServer is the server API for TrillianLog service.
// All implementations should embed UnimplementedTrillianLogServer
// for forward compatibility
type TrillianLogServer interface {
// QueueLeaf adds a single leaf to the queue of pending leaves for a normal
// log.
QueueLeaf(context.Context, *QueueLeafRequest) (*QueueLeafResponse, error)
// GetInclusionProof returns an inclusion proof for a leaf with a given index
// in a particular tree.
//
// If the requested tree_size is larger than the server is aware of, the
// response will include the latest known log root and an empty proof.
GetInclusionProof(context.Context, *GetInclusionProofRequest) (*GetInclusionProofResponse, error)
// GetInclusionProofByHash returns an inclusion proof for any leaves that have
// the given Merkle hash in a particular tree.
//
// If any of the leaves that match the given Merkle has have a leaf index that
// is beyond the requested tree size, the corresponding proof entry will be empty.
GetInclusionProofByHash(context.Context, *GetInclusionProofByHashRequest) (*GetInclusionProofByHashResponse, error)
// GetConsistencyProof returns a consistency proof between different sizes of
// a particular tree.
//
// If the requested tree size is larger than the server is aware of,
// the response will include the latest known log root and an empty proof.
GetConsistencyProof(context.Context, *GetConsistencyProofRequest) (*GetConsistencyProofResponse, error)
// GetLatestSignedLogRoot returns the latest log root for a given tree,
// and optionally also includes a consistency proof from an earlier tree size
// to the new size of the tree.
//
// If the earlier tree size is larger than the server is aware of,
// an InvalidArgument error is returned.
GetLatestSignedLogRoot(context.Context, *GetLatestSignedLogRootRequest) (*GetLatestSignedLogRootResponse, error)
// GetEntryAndProof returns a log leaf and the corresponding inclusion proof
// to a specified tree size, for a given leaf index in a particular tree.
//
// If the requested tree size is unavailable but the leaf is
// in scope for the current tree, the returned proof will be for the
// current tree size rather than the requested tree size.
GetEntryAndProof(context.Context, *GetEntryAndProofRequest) (*GetEntryAndProofResponse, error)
// InitLog initializes a particular tree, creating the initial signed log
// root (which will be of size 0).
InitLog(context.Context, *InitLogRequest) (*InitLogResponse, error)
// AddSequencedLeaves adds a batch of leaves with assigned sequence numbers
// to a pre-ordered log. The indices of the provided leaves must be contiguous.
AddSequencedLeaves(context.Context, *AddSequencedLeavesRequest) (*AddSequencedLeavesResponse, error)
// GetLeavesByRange returns a batch of leaves whose leaf indices are in a
// sequential range.
GetLeavesByRange(context.Context, *GetLeavesByRangeRequest) (*GetLeavesByRangeResponse, error)
}
// UnimplementedTrillianLogServer should be embedded to have forward compatible implementations.
type UnimplementedTrillianLogServer struct {
}
func (UnimplementedTrillianLogServer) QueueLeaf(context.Context, *QueueLeafRequest) (*QueueLeafResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method QueueLeaf not implemented")
}
func (UnimplementedTrillianLogServer) GetInclusionProof(context.Context, *GetInclusionProofRequest) (*GetInclusionProofResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetInclusionProof not implemented")
}
func (UnimplementedTrillianLogServer) GetInclusionProofByHash(context.Context, *GetInclusionProofByHashRequest) (*GetInclusionProofByHashResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetInclusionProofByHash not implemented")
}
func (UnimplementedTrillianLogServer) GetConsistencyProof(context.Context, *GetConsistencyProofRequest) (*GetConsistencyProofResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetConsistencyProof not implemented")
}
func (UnimplementedTrillianLogServer) GetLatestSignedLogRoot(context.Context, *GetLatestSignedLogRootRequest) (*GetLatestSignedLogRootResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetLatestSignedLogRoot not implemented")
}
func (UnimplementedTrillianLogServer) GetEntryAndProof(context.Context, *GetEntryAndProofRequest) (*GetEntryAndProofResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetEntryAndProof not implemented")
}
func (UnimplementedTrillianLogServer) InitLog(context.Context, *InitLogRequest) (*InitLogResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method InitLog not implemented")
}
func (UnimplementedTrillianLogServer) AddSequencedLeaves(context.Context, *AddSequencedLeavesRequest) (*AddSequencedLeavesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method AddSequencedLeaves not implemented")
}
func (UnimplementedTrillianLogServer) GetLeavesByRange(context.Context, *GetLeavesByRangeRequest) (*GetLeavesByRangeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetLeavesByRange not implemented")
}
// UnsafeTrillianLogServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to TrillianLogServer will
// result in compilation errors.
type UnsafeTrillianLogServer interface {
mustEmbedUnimplementedTrillianLogServer()
}
func RegisterTrillianLogServer(s grpc.ServiceRegistrar, srv TrillianLogServer) {
s.RegisterService(&TrillianLog_ServiceDesc, srv)
}
func _TrillianLog_QueueLeaf_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(QueueLeafRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TrillianLogServer).QueueLeaf(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: TrillianLog_QueueLeaf_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).QueueLeaf(ctx, req.(*QueueLeafRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TrillianLog_GetInclusionProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetInclusionProofRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TrillianLogServer).GetInclusionProof(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: TrillianLog_GetInclusionProof_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).GetInclusionProof(ctx, req.(*GetInclusionProofRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TrillianLog_GetInclusionProofByHash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetInclusionProofByHashRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TrillianLogServer).GetInclusionProofByHash(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: TrillianLog_GetInclusionProofByHash_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).GetInclusionProofByHash(ctx, req.(*GetInclusionProofByHashRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TrillianLog_GetConsistencyProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetConsistencyProofRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TrillianLogServer).GetConsistencyProof(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: TrillianLog_GetConsistencyProof_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).GetConsistencyProof(ctx, req.(*GetConsistencyProofRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TrillianLog_GetLatestSignedLogRoot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetLatestSignedLogRootRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TrillianLogServer).GetLatestSignedLogRoot(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: TrillianLog_GetLatestSignedLogRoot_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).GetLatestSignedLogRoot(ctx, req.(*GetLatestSignedLogRootRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TrillianLog_GetEntryAndProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetEntryAndProofRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TrillianLogServer).GetEntryAndProof(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: TrillianLog_GetEntryAndProof_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).GetEntryAndProof(ctx, req.(*GetEntryAndProofRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TrillianLog_InitLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(InitLogRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TrillianLogServer).InitLog(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: TrillianLog_InitLog_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).InitLog(ctx, req.(*InitLogRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TrillianLog_AddSequencedLeaves_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AddSequencedLeavesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TrillianLogServer).AddSequencedLeaves(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: TrillianLog_AddSequencedLeaves_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).AddSequencedLeaves(ctx, req.(*AddSequencedLeavesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _TrillianLog_GetLeavesByRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetLeavesByRangeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TrillianLogServer).GetLeavesByRange(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: TrillianLog_GetLeavesByRange_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TrillianLogServer).GetLeavesByRange(ctx, req.(*GetLeavesByRangeRequest))
}
return interceptor(ctx, in, info, handler)
}
// TrillianLog_ServiceDesc is the grpc.ServiceDesc for TrillianLog service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var TrillianLog_ServiceDesc = grpc.ServiceDesc{
ServiceName: "trillian.TrillianLog",
HandlerType: (*TrillianLogServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "QueueLeaf",
Handler: _TrillianLog_QueueLeaf_Handler,
},
{
MethodName: "GetInclusionProof",
Handler: _TrillianLog_GetInclusionProof_Handler,
},
{
MethodName: "GetInclusionProofByHash",
Handler: _TrillianLog_GetInclusionProofByHash_Handler,
},
{
MethodName: "GetConsistencyProof",
Handler: _TrillianLog_GetConsistencyProof_Handler,
},
{
MethodName: "GetLatestSignedLogRoot",
Handler: _TrillianLog_GetLatestSignedLogRoot_Handler,
},
{
MethodName: "GetEntryAndProof",
Handler: _TrillianLog_GetEntryAndProof_Handler,
},
{
MethodName: "InitLog",
Handler: _TrillianLog_InitLog_Handler,
},
{
MethodName: "AddSequencedLeaves",
Handler: _TrillianLog_AddSequencedLeaves_Handler,
},
{
MethodName: "GetLeavesByRange",
Handler: _TrillianLog_GetLeavesByRange_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "trillian_log_api.proto",
}

View File

@@ -1,713 +0,0 @@
// Copyright 2016 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tls implements functionality for dealing with TLS-encoded data,
// as defined in RFC 5246. This includes parsing and generation of TLS-encoded
// data, together with utility functions for dealing with the DigitallySigned
// TLS type.
// N.B. This is copied from https://github.com/google/certificate-transparency-go/tree/master/tls
// - DO NOT MAKE CHANGES TO THIS FILE except to sync to the latest from ct-go.
package tls
import (
"bytes"
"encoding/binary"
"fmt"
"reflect"
"strconv"
"strings"
)
// This file holds utility functions for TLS encoding/decoding data
// as per RFC 5246 section 4.
// A structuralError suggests that the TLS data is valid, but the Go type
// which is receiving it doesn't match.
type structuralError struct {
field string
msg string
}
func (e structuralError) Error() string {
var prefix string
if e.field != "" {
prefix = e.field + ": "
}
return "tls: structure error: " + prefix + e.msg
}
// A syntaxError suggests that the TLS data is invalid.
type syntaxError struct {
field string
msg string
}
func (e syntaxError) Error() string {
var prefix string
if e.field != "" {
prefix = e.field + ": "
}
return "tls: syntax error: " + prefix + e.msg
}
// Uint24 is an unsigned 3-byte integer.
type Uint24 uint32
// Enum is an unsigned integer.
type Enum uint64
var (
uint8Type = reflect.TypeOf(uint8(0))
uint16Type = reflect.TypeOf(uint16(0))
uint24Type = reflect.TypeOf(Uint24(0))
uint32Type = reflect.TypeOf(uint32(0))
uint64Type = reflect.TypeOf(uint64(0))
enumType = reflect.TypeOf(Enum(0))
)
// Unmarshal parses the TLS-encoded data in b and uses the reflect package to
// fill in an arbitrary value pointed at by val. Because Unmarshal uses the
// reflect package, the structs being written to must use exported fields
// (upper case names).
//
// The mappings between TLS types and Go types is as follows; some fields
// must have tags (to indicate their encoded size).
//
// TLS Go Required Tags
// opaque byte / uint8
// uint8 byte / uint8
// uint16 uint16
// uint24 tls.Uint24
// uint32 uint32
// uint64 uint64
// enum tls.Enum size:S or maxval:N
// Type<N,M> []Type minlen:N,maxlen:M
// opaque[N] [N]byte / [N]uint8
// uint8[N] [N]byte / [N]uint8
// struct { } struct { }
// select(T) {
// case e1: Type *T selector:Field,val:e1
// }
//
// TLS variants (RFC 5246 s4.6.1) are only supported when the value of the
// associated enumeration type is available earlier in the same enclosing
// struct, and each possible variant is marked with a selector tag (to
// indicate which field selects the variants) and a val tag (to indicate
// what value of the selector picks this particular field).
//
// For example, a TLS structure:
//
// enum { e1(1), e2(2) } EnumType;
// struct {
// EnumType sel;
// select(sel) {
// case e1: uint16
// case e2: uint32
// } data;
// } VariantItem;
//
// would have a corresponding Go type:
//
// type VariantItem struct {
// Sel tls.Enum `tls:"maxval:2"`
// Data16 *uint16 `tls:"selector:Sel,val:1"`
// Data32 *uint32 `tls:"selector:Sel,val:2"`
// }
//
// TLS fixed-length vectors of types other than opaque or uint8 are not supported.
//
// For TLS variable-length vectors that are themselves used in other vectors,
// create a single-field structure to represent the inner type. For example, for:
//
// opaque InnerType<1..65535>;
// struct {
// InnerType inners<1,65535>;
// } Something;
//
// convert to:
//
// type InnerType struct {
// Val []byte `tls:"minlen:1,maxlen:65535"`
// }
// type Something struct {
// Inners []InnerType `tls:"minlen:1,maxlen:65535"`
// }
//
// If the encoded value does not fit in the Go type, Unmarshal returns a parse error.
func Unmarshal(b []byte, val interface{}) ([]byte, error) {
return UnmarshalWithParams(b, val, "")
}
// UnmarshalWithParams allows field parameters to be specified for the
// top-level element. The form of the params is the same as the field tags.
func UnmarshalWithParams(b []byte, val interface{}, params string) ([]byte, error) {
info, err := fieldTagToFieldInfo(params, "")
if err != nil {
return nil, err
}
// The passed in interface{} is a pointer (to allow the value to be written
// to); extract the pointed-to object as a reflect.Value, so parseField
// can do various introspection things.
v := reflect.ValueOf(val).Elem()
offset, err := parseField(v, b, 0, info)
if err != nil {
return nil, err
}
return b[offset:], nil
}
// Return the number of bytes needed to encode values up to (and including) x.
func byteCount(x uint64) uint {
switch {
case x < 0x100:
return 1
case x < 0x10000:
return 2
case x < 0x1000000:
return 3
case x < 0x100000000:
return 4
case x < 0x10000000000:
return 5
case x < 0x1000000000000:
return 6
case x < 0x100000000000000:
return 7
default:
return 8
}
}
type fieldInfo struct {
count uint // Number of bytes
countSet bool
minlen uint64 // Only relevant for slices
maxlen uint64 // Only relevant for slices
selector string // Only relevant for select sub-values
val uint64 // Only relevant for select sub-values
name string // Used for better error messages
}
func (i *fieldInfo) fieldName() string {
if i == nil {
return ""
}
return i.name
}
// Given a tag string, return a fieldInfo describing the field.
func fieldTagToFieldInfo(str string, name string) (*fieldInfo, error) {
var info *fieldInfo
// Iterate over clauses in the tag, ignoring any that don't parse properly.
for _, part := range strings.Split(str, ",") {
switch {
case strings.HasPrefix(part, "maxval:"):
if v, err := strconv.ParseUint(part[7:], 10, 64); err == nil {
info = &fieldInfo{count: byteCount(v), countSet: true}
}
case strings.HasPrefix(part, "size:"):
if sz, err := strconv.ParseUint(part[5:], 10, 32); err == nil {
info = &fieldInfo{count: uint(sz), countSet: true}
}
case strings.HasPrefix(part, "maxlen:"):
v, err := strconv.ParseUint(part[7:], 10, 64)
if err != nil {
continue
}
if info == nil {
info = &fieldInfo{}
}
info.count = byteCount(v)
info.countSet = true
info.maxlen = v
case strings.HasPrefix(part, "minlen:"):
v, err := strconv.ParseUint(part[7:], 10, 64)
if err != nil {
continue
}
if info == nil {
info = &fieldInfo{}
}
info.minlen = v
case strings.HasPrefix(part, "selector:"):
if info == nil {
info = &fieldInfo{}
}
info.selector = part[9:]
case strings.HasPrefix(part, "val:"):
v, err := strconv.ParseUint(part[4:], 10, 64)
if err != nil {
continue
}
if info == nil {
info = &fieldInfo{}
}
info.val = v
}
}
if info != nil {
info.name = name
if info.selector == "" {
if info.count < 1 {
return nil, structuralError{name, "field of unknown size in " + str}
} else if info.count > 8 {
return nil, structuralError{name, "specified size too large in " + str}
} else if info.minlen > info.maxlen {
return nil, structuralError{name, "specified length range inverted in " + str}
} else if info.val > 0 {
return nil, structuralError{name, "specified selector value but not field in " + str}
}
}
} else if name != "" {
info = &fieldInfo{name: name}
}
return info, nil
}
// Check that a value fits into a field described by a fieldInfo structure.
func (i fieldInfo) check(val uint64, fldName string) error {
if val >= (1 << (8 * i.count)) {
return structuralError{fldName, fmt.Sprintf("value %d too large for size", val)}
}
if i.maxlen != 0 {
if val < i.minlen {
return structuralError{fldName, fmt.Sprintf("value %d too small for minimum %d", val, i.minlen)}
}
if val > i.maxlen {
return structuralError{fldName, fmt.Sprintf("value %d too large for maximum %d", val, i.maxlen)}
}
}
return nil
}
// readVarUint reads an big-endian unsigned integer of the given size in
// bytes.
func readVarUint(data []byte, info *fieldInfo) (uint64, error) {
if info == nil || !info.countSet {
return 0, structuralError{info.fieldName(), "no field size information available"}
}
if len(data) < int(info.count) {
return 0, syntaxError{info.fieldName(), "truncated variable-length integer"}
}
var result uint64
for i := uint(0); i < info.count; i++ {
result = (result << 8) | uint64(data[i])
}
if err := info.check(result, info.name); err != nil {
return 0, err
}
return result, nil
}
// parseField is the main parsing function. Given a byte slice and an offset
// (in bytes) into the data, it will try to parse a suitable ASN.1 value out
// and store it in the given Value.
func parseField(v reflect.Value, data []byte, initOffset int, info *fieldInfo) (int, error) {
offset := initOffset
rest := data[offset:]
fieldType := v.Type()
// First look for known fixed types.
switch fieldType {
case uint8Type:
if len(rest) < 1 {
return offset, syntaxError{info.fieldName(), "truncated uint8"}
}
v.SetUint(uint64(rest[0]))
offset++
return offset, nil
case uint16Type:
if len(rest) < 2 {
return offset, syntaxError{info.fieldName(), "truncated uint16"}
}
v.SetUint(uint64(binary.BigEndian.Uint16(rest)))
offset += 2
return offset, nil
case uint24Type:
if len(rest) < 3 {
return offset, syntaxError{info.fieldName(), "truncated uint24"}
}
v.SetUint(uint64(data[0])<<16 | uint64(data[1])<<8 | uint64(data[2]))
offset += 3
return offset, nil
case uint32Type:
if len(rest) < 4 {
return offset, syntaxError{info.fieldName(), "truncated uint32"}
}
v.SetUint(uint64(binary.BigEndian.Uint32(rest)))
offset += 4
return offset, nil
case uint64Type:
if len(rest) < 8 {
return offset, syntaxError{info.fieldName(), "truncated uint64"}
}
v.SetUint(uint64(binary.BigEndian.Uint64(rest)))
offset += 8
return offset, nil
}
// Now deal with user-defined types.
switch v.Kind() {
case enumType.Kind():
// Assume that anything of the same kind as Enum is an Enum, so that
// users can alias types of their own to Enum.
val, err := readVarUint(rest, info)
if err != nil {
return offset, err
}
v.SetUint(val)
offset += int(info.count)
return offset, nil
case reflect.Struct:
structType := fieldType
// TLS includes a select(Enum) {..} construct, where the value of an enum
// indicates which variant field is present (like a C union). We require
// that the enum value be an earlier field in the same structure (the selector),
// and that each of the possible variant destination fields be pointers.
// So the Go mapping looks like:
// type variantType struct {
// Which tls.Enum `tls:"size:1"` // this is the selector
// Val1 *type1 `tls:"selector:Which,val:1"` // this is a destination
// Val2 *type2 `tls:"selector:Which,val:1"` // this is a destination
// }
// To deal with this, we track any enum-like fields and their values...
enums := make(map[string]uint64)
// .. and we track which selector names we've seen (in the destination field tags),
// and whether a destination for that selector has been chosen.
selectorSeen := make(map[string]bool)
for i := 0; i < structType.NumField(); i++ {
// Find information about this field.
tag := structType.Field(i).Tag.Get("tls")
fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name)
if err != nil {
return offset, err
}
destination := v.Field(i)
if fieldInfo.selector != "" {
// This is a possible select(Enum) destination, so first check that the referenced
// selector field has already been seen earlier in the struct.
choice, ok := enums[fieldInfo.selector]
if !ok {
return offset, structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector}
}
if structType.Field(i).Type.Kind() != reflect.Ptr {
return offset, structuralError{fieldInfo.name, "choice field not a pointer type"}
}
// Is this the first mention of the selector field name? If so, remember it.
seen, ok := selectorSeen[fieldInfo.selector]
if !ok {
selectorSeen[fieldInfo.selector] = false
}
if choice != fieldInfo.val {
// This destination field was not the chosen one, so make it nil (we checked
// it was a pointer above).
v.Field(i).Set(reflect.Zero(structType.Field(i).Type))
continue
}
if seen {
// We already saw a different destination field receive the value for this
// selector value, which indicates a badly annotated structure.
return offset, structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector}
}
selectorSeen[fieldInfo.selector] = true
// Make an object of the pointed-to type and parse into that.
v.Field(i).Set(reflect.New(structType.Field(i).Type.Elem()))
destination = v.Field(i).Elem()
}
offset, err = parseField(destination, data, offset, fieldInfo)
if err != nil {
return offset, err
}
// Remember any possible tls.Enum values encountered in case they are selectors.
if structType.Field(i).Type.Kind() == enumType.Kind() {
enums[structType.Field(i).Name] = v.Field(i).Uint()
}
}
// Now we have seen all fields in the structure, check that all select(Enum) {..} selector
// fields found a destination to put their data in.
for selector, seen := range selectorSeen {
if !seen {
return offset, syntaxError{info.fieldName(), selector + ": unhandled value for selector"}
}
}
return offset, nil
case reflect.Array:
datalen := v.Len()
if datalen > len(rest) {
return offset, syntaxError{info.fieldName(), "truncated array"}
}
inner := rest[:datalen]
offset += datalen
if fieldType.Elem().Kind() != reflect.Uint8 {
// Only byte/uint8 arrays are supported
return offset, structuralError{info.fieldName(), "unsupported array type: " + v.Type().String()}
}
reflect.Copy(v, reflect.ValueOf(inner))
return offset, nil
case reflect.Slice:
sliceType := fieldType
// Slices represent variable-length vectors, which are prefixed by a length field.
// The fieldInfo indicates the size of that length field.
varlen, err := readVarUint(rest, info)
if err != nil {
return offset, err
}
datalen := int(varlen)
offset += int(info.count)
rest = rest[info.count:]
if datalen > len(rest) {
return offset, syntaxError{info.fieldName(), "truncated slice"}
}
inner := rest[:datalen]
offset += datalen
if fieldType.Elem().Kind() == reflect.Uint8 {
// Fast version for []byte
v.Set(reflect.MakeSlice(sliceType, datalen, datalen))
reflect.Copy(v, reflect.ValueOf(inner))
return offset, nil
}
v.Set(reflect.MakeSlice(sliceType, 0, datalen))
single := reflect.New(sliceType.Elem())
for innerOffset := 0; innerOffset < len(inner); {
var err error
innerOffset, err = parseField(single.Elem(), inner, innerOffset, nil)
if err != nil {
return offset, err
}
v.Set(reflect.Append(v, single.Elem()))
}
return offset, nil
default:
return offset, structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())}
}
}
// Marshal returns the TLS encoding of val.
func Marshal(val interface{}) ([]byte, error) {
return MarshalWithParams(val, "")
}
// MarshalWithParams returns the TLS encoding of val, and allows field
// parameters to be specified for the top-level element. The form
// of the params is the same as the field tags.
func MarshalWithParams(val interface{}, params string) ([]byte, error) {
info, err := fieldTagToFieldInfo(params, "")
if err != nil {
return nil, err
}
var out bytes.Buffer
v := reflect.ValueOf(val)
if err := marshalField(&out, v, info); err != nil {
return nil, err
}
return out.Bytes(), err
}
func marshalField(out *bytes.Buffer, v reflect.Value, info *fieldInfo) error {
var prefix string
if info != nil && len(info.name) > 0 {
prefix = info.name + ": "
}
fieldType := v.Type()
// First look for known fixed types.
switch fieldType {
case uint8Type:
out.WriteByte(byte(v.Uint()))
return nil
case uint16Type:
scratch := make([]byte, 2)
binary.BigEndian.PutUint16(scratch, uint16(v.Uint()))
out.Write(scratch)
return nil
case uint24Type:
i := v.Uint()
if i > 0xffffff {
return structuralError{info.fieldName(), fmt.Sprintf("uint24 overflow %d", i)}
}
scratch := make([]byte, 4)
binary.BigEndian.PutUint32(scratch, uint32(i))
out.Write(scratch[1:])
return nil
case uint32Type:
scratch := make([]byte, 4)
binary.BigEndian.PutUint32(scratch, uint32(v.Uint()))
out.Write(scratch)
return nil
case uint64Type:
scratch := make([]byte, 8)
binary.BigEndian.PutUint64(scratch, uint64(v.Uint()))
out.Write(scratch)
return nil
}
// Now deal with user-defined types.
switch v.Kind() {
case enumType.Kind():
i := v.Uint()
if info == nil {
return structuralError{info.fieldName(), "enum field tag missing"}
}
if err := info.check(i, prefix); err != nil {
return err
}
scratch := make([]byte, 8)
binary.BigEndian.PutUint64(scratch, uint64(i))
out.Write(scratch[(8 - info.count):])
return nil
case reflect.Struct:
structType := fieldType
enums := make(map[string]uint64) // Values of any Enum fields
// The comment parseField() describes the mapping of the TLS select(Enum) {..} construct;
// here we have selector and source (rather than destination) fields.
// Track which selector names we've seen (in the source field tags), and whether a source
// value for that selector has been processed.
selectorSeen := make(map[string]bool)
for i := 0; i < structType.NumField(); i++ {
// Find information about this field.
tag := structType.Field(i).Tag.Get("tls")
fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name)
if err != nil {
return err
}
source := v.Field(i)
if fieldInfo.selector != "" {
// This field is a possible source for a select(Enum) {..}. First check
// the selector field name has been seen.
choice, ok := enums[fieldInfo.selector]
if !ok {
return structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector}
}
if structType.Field(i).Type.Kind() != reflect.Ptr {
return structuralError{fieldInfo.name, "choice field not a pointer type"}
}
// Is this the first mention of the selector field name? If so, remember it.
seen, ok := selectorSeen[fieldInfo.selector]
if !ok {
selectorSeen[fieldInfo.selector] = false
}
if choice != fieldInfo.val {
// This source was not chosen; police that it should be nil.
if v.Field(i).Pointer() != uintptr(0) {
return structuralError{fieldInfo.name, "unchosen field is non-nil"}
}
continue
}
if seen {
// We already saw a different source field generate the value for this
// selector value, which indicates a badly annotated structure.
return structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector}
}
selectorSeen[fieldInfo.selector] = true
if v.Field(i).Pointer() == uintptr(0) {
return structuralError{fieldInfo.name, "chosen field is nil"}
}
// Marshal from the pointed-to source object.
source = v.Field(i).Elem()
}
var fieldData bytes.Buffer
if err := marshalField(&fieldData, source, fieldInfo); err != nil {
return err
}
out.Write(fieldData.Bytes())
// Remember any tls.Enum values encountered in case they are selectors.
if structType.Field(i).Type.Kind() == enumType.Kind() {
enums[structType.Field(i).Name] = v.Field(i).Uint()
}
}
// Now we have seen all fields in the structure, check that all select(Enum) {..} selector
// fields found a source field get get their data from.
for selector, seen := range selectorSeen {
if !seen {
return syntaxError{info.fieldName(), selector + ": unhandled value for selector"}
}
}
return nil
case reflect.Array:
datalen := v.Len()
arrayType := fieldType
if arrayType.Elem().Kind() != reflect.Uint8 {
// Only byte/uint8 arrays are supported
return structuralError{info.fieldName(), "unsupported array type"}
}
bytes := make([]byte, datalen)
for i := 0; i < datalen; i++ {
bytes[i] = uint8(v.Index(i).Uint())
}
_, err := out.Write(bytes)
return err
case reflect.Slice:
if info == nil {
return structuralError{info.fieldName(), "slice field tag missing"}
}
sliceType := fieldType
if sliceType.Elem().Kind() == reflect.Uint8 {
// Fast version for []byte: first write the length as info.count bytes.
datalen := v.Len()
scratch := make([]byte, 8)
binary.BigEndian.PutUint64(scratch, uint64(datalen))
out.Write(scratch[(8 - info.count):])
if err := info.check(uint64(datalen), prefix); err != nil {
return err
}
// Then just write the data.
bytes := make([]byte, datalen)
for i := 0; i < datalen; i++ {
bytes[i] = uint8(v.Index(i).Uint())
}
_, err := out.Write(bytes)
return err
}
// General version: use a separate Buffer to write the slice entries into.
var innerBuf bytes.Buffer
for i := 0; i < v.Len(); i++ {
if err := marshalField(&innerBuf, v.Index(i), nil); err != nil {
return err
}
}
// Now insert (and check) the size.
size := uint64(innerBuf.Len())
if err := info.check(size, prefix); err != nil {
return err
}
scratch := make([]byte, 8)
binary.BigEndian.PutUint64(scratch, size)
out.Write(scratch[(8 - info.count):])
// Then copy the data.
_, err := out.Write(innerBuf.Bytes())
return err
default:
return structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())}
}
}

View File

@@ -1,102 +0,0 @@
// Copyright 2018 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package types defines serialization and parsing functions for SignedLogRoot
// fields.
package types
import (
"encoding/binary"
"fmt"
"github.com/google/trillian/types/internal/tls"
"github.com/google/trillian"
)
// LogRootV1 holds the TLS-deserialization of the following structure
// (described in RFC5246 section 4 notation):
//
// struct {
// uint64 tree_size;
// opaque root_hash<0..128>;
// uint64 timestamp_nanos;
// uint64 revision;
// opaque metadata<0..65535>;
// } LogRootV1;
type LogRootV1 struct {
// TreeSize is the number of leaves in the log Merkle tree.
TreeSize uint64
// RootHash is the hash of the root node of the tree.
RootHash []byte `tls:"minlen:0,maxlen:128"`
// TimestampNanos is the time in nanoseconds for when this root was created,
// counting from the UNIX epoch.
TimestampNanos uint64
// Revision is the Merkle tree revision associated with this root.
//
// Deprecated: Revision is a concept internal to the storage layer.
Revision uint64
// Metadata holds additional data associated with this root.
Metadata []byte `tls:"minlen:0,maxlen:65535"`
}
// LogRoot holds the TLS-deserialization of the following structure
// (described in RFC5246 section 4 notation):
// enum { v1(1), (65535)} Version;
//
// struct {
// Version version;
// select(version) {
// case v1: LogRootV1;
// }
// } LogRoot;
type LogRoot struct {
Version tls.Enum `tls:"size:2"`
V1 *LogRootV1 `tls:"selector:Version,val:1"`
}
// UnmarshalBinary verifies that logRootBytes is a TLS serialized LogRoot, has
// the LOG_ROOT_FORMAT_V1 tag, and populates the caller with the deserialized
// *LogRootV1.
func (l *LogRootV1) UnmarshalBinary(logRootBytes []byte) error {
if len(logRootBytes) < 3 {
return fmt.Errorf("logRootBytes too short")
}
if l == nil {
return fmt.Errorf("nil log root")
}
version := binary.BigEndian.Uint16(logRootBytes)
if version != uint16(trillian.LogRootFormat_LOG_ROOT_FORMAT_V1) {
return fmt.Errorf("invalid LogRoot.Version: %v, want %v",
version, trillian.LogRootFormat_LOG_ROOT_FORMAT_V1)
}
var logRoot LogRoot
if _, err := tls.Unmarshal(logRootBytes, &logRoot); err != nil {
return err
}
*l = *logRoot.V1
return nil
}
// MarshalBinary returns a canonical TLS serialization of LogRoot.
func (l *LogRootV1) MarshalBinary() ([]byte, error) {
return tls.Marshal(LogRoot{
Version: tls.Enum(trillian.LogRootFormat_LOG_ROOT_FORMAT_V1),
V1: l,
})
}