mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Merge pull request #57160 from jpbetz/etcd-client-3.2.11
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.
Version bump to etcd v3.2.11, grpc v1.7.5
Fix https://github.com/kubernetes/kubernetes/issues/56114: Update to etcd client 3.2.11
Version bumps:
- etcd from 3.1.10 to 3.2.11
- grpc from 1.3.0 to 1.7.5
- grpc-gateway from v1.1.0-25-g84398b9 to v1.3.0
TODO:
- [x] Apply etcd [3.2 client upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md)
- [x] Apply grpc API changes in 1.6.0 and 1.7.0 [release notes](https://github.com/grpc/grpc-go/releases)
- [x] bbolt was pulled in transitively, why? We have tests that embed etcd, so we must vendor the etcd server and all it's dependencies.
- [x] Upgrade to containerd v1.0.0? Currently kubernetes depends on containerd v1.0.0-beta.2-159-g27d450a0 which depends on grpc v1.3.0, but containerd v1.0.0 depends on grpc 1.7.2. Not needed. The containerd grpc upgrade required [no code changes](ce3e32680d
).
- [x] Fix all failing tests
- [x] Ensure we can safely upgrade grpc to 1.7.5 given that docker and cAdvisor still depend on grpc 1.3.0 (both in the versions we vend and on master for both projects). Should we hold off on this change until we have a docker release that uses gprc 1.7.x?
- [x] Wait for grpc 1.7.5 to be released (it will include https://github.com/grpc/grpc-go/pull/1747). Once released, bump grpc version in this PR and remove workarounds in `hack/godep-save.sh`.
Transitive dependencies on grpc:
- docker depends on grpc, but according to the package dependency graph (`go list -f '{{ .Deps }}'`) there are no dependencies from kubernetes to grpc via docker packages.
- containerd v1.0.0 depends on grpc 1.7.2, we should upgrade to containerd v1.0.0 soon, this can be done in a separate PR
- cadvisor depends on grpc 1.3.0 on master, it should upgrade it to grpc 1.7.5, this can be done in a separate PR
**Release note**:
```release-note
Upgrade to etcd client 3.2.11 and grpc 1.7.5 to improve HA etcd cluster stability.
```
This commit is contained in:
commit
a7b404ec7f
429
Godeps/Godeps.json
generated
429
Godeps/Godeps.json
generated
@ -333,11 +333,6 @@
|
||||
"Comment": "v3.5.0",
|
||||
"Rev": "b38d23b8782a487059e8fc8773e9a5b228a77cb6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "583e8937c61f1af6513608ccc75c97b6abdf4ff9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/chai2010/gettext-go/gettext",
|
||||
"Rev": "c6fed771bfd517099caf0f7a961671fa8ed08723"
|
||||
@ -423,6 +418,10 @@
|
||||
"ImportPath": "github.com/clusterhq/flocker-go",
|
||||
"Rev": "2b8b7259d3139c96c4a6871031355808ab3fd3b3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cockroachdb/cmux",
|
||||
"Rev": "112f0506e7743d64a6eb8fedbcff13d9979bbf92"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codedellemc/goscaleio",
|
||||
"Rev": "20e2ce2cf8852dc78bd42b76698dcd8dcd77b7b1"
|
||||
@ -515,285 +514,375 @@
|
||||
"Comment": "v0.6.0",
|
||||
"Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/bbolt",
|
||||
"Comment": "v1.3.1-coreos.6",
|
||||
"Rev": "48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/alarm",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/auth",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/client",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/clientv3",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/clientv3/concurrency",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/clientv3/namespace",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/clientv3/naming",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/compactor",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/discovery",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/embed",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/error",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/etcdhttp",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3client",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/auth",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb/gw",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/membership",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/stats",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/integration",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/lease",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/lease/leasehttp",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/lease/leasepb",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/mvcc",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/mvcc/backend",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/adt",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/contention",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/cors",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/cpuutil",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/crc",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/debugutil",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/fileutil",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/httputil",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/idutil",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/ioutil",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/logutil",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/monotime",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/netutil",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/pbutil",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/runtime",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/schedule",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/srv",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/testutil",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/transport",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/wait",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/adapter",
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/cache",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/raft",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/raft/raftpb",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/rafthttp",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/snap",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/snap/snappb",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/store",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/version",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/wal",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/wal/walpb",
|
||||
"Comment": "v3.1.10",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Comment": "v3.2.11",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-oidc/http",
|
||||
@ -1352,6 +1441,10 @@
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/ptypes",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
@ -1778,18 +1871,18 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime",
|
||||
"Comment": "v1.1.0-25-g84398b9",
|
||||
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal",
|
||||
"Comment": "v1.1.0-25-g84398b9",
|
||||
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities",
|
||||
"Comment": "v1.1.0-25-g84398b9",
|
||||
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
@ -1921,11 +2014,6 @@
|
||||
"ImportPath": "github.com/kardianos/osext",
|
||||
"Rev": "8fef92e41e22a70e700a96b29f066cda30ea24ef"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/karlseguin/ccache",
|
||||
"Comment": "v2.0.2-5-g3ba9789",
|
||||
"Rev": "3ba9789cfd2cb7b4fb4657efc994cc1c599a648c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kr/fs",
|
||||
"Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||
@ -2912,79 +3000,108 @@
|
||||
"ImportPath": "google.golang.org/api/pubsub/v1",
|
||||
"Rev": "654f863362977d69086620b5f72f13e911da2410"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/genproto/googleapis/api/annotations",
|
||||
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
|
||||
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/codes",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/connectivity",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/credentials",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/health/grpc_health_v1",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/metadata",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/naming",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/peer",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver",
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/stats",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/status",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/tap",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/transport",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Comment": "v1.7.5",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/gcfg.v1",
|
||||
|
8326
Godeps/LICENSES
generated
8326
Godeps/LICENSES
generated
File diff suppressed because it is too large
Load Diff
@ -32,60 +32,60 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/client",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/clientv3",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/fileutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/srv",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/transport",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/version",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-semver/semver",
|
||||
"Rev": "568e959cd89871e61434c1143528d9162da89ef2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-systemd/daemon",
|
||||
"Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-systemd/journal",
|
||||
"Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/pkg/capnslog",
|
||||
"Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8"
|
||||
@ -167,11 +167,11 @@
|
||||
"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/jsonpb",
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
@ -186,10 +186,6 @@
|
||||
"ImportPath": "github.com/golang/protobuf/ptypes/duration",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/ptypes/struct",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/ptypes/timestamp",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
@ -222,22 +218,6 @@
|
||||
"ImportPath": "github.com/gregjones/httpcache/diskcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||
"Rev": "2500245aa6110c562d17020fb31a2c133d737799"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime",
|
||||
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal",
|
||||
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities",
|
||||
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
|
||||
@ -442,65 +422,85 @@
|
||||
"ImportPath": "golang.org/x/text/width",
|
||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/genproto/googleapis/api/annotations",
|
||||
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
|
||||
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/codes",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/connectivity",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/credentials",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/health/grpc_health_v1",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/metadata",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/naming",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/peer",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/stats",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/status",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/tap",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/transport",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/inf.v0",
|
||||
|
260
staging/src/k8s.io/apiserver/Godeps/Godeps.json
generated
260
staging/src/k8s.io/apiserver/Godeps/Godeps.json
generated
@ -27,232 +27,304 @@
|
||||
"Rev": "3ac7bf7a47d159a033b107610db8a1b6575507a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Rev": "583e8937c61f1af6513608ccc75c97b6abdf4ff9"
|
||||
"ImportPath": "github.com/cockroachdb/cmux",
|
||||
"Rev": "112f0506e7743d64a6eb8fedbcff13d9979bbf92"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/bbolt",
|
||||
"Rev": "48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/alarm",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/auth",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/client",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/clientv3",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/clientv3/concurrency",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/clientv3/namespace",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/clientv3/naming",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/compactor",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/discovery",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/embed",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/error",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/etcdhttp",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3client",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/auth",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb/gw",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/membership",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/stats",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/integration",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/lease",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/lease/leasehttp",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/lease/leasepb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/mvcc",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/mvcc/backend",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/adt",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/contention",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/cors",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/cpuutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/crc",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/debugutil",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/fileutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/httputil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/idutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/ioutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/logutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/monotime",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/netutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/pbutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/runtime",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/schedule",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/srv",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/testutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/transport",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/wait",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/adapter",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/cache",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/raft",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/raft/raftpb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/rafthttp",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/snap",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/snap/snappb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/store",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/version",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/wal",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/wal/walpb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-oidc/http",
|
||||
@ -306,6 +378,10 @@
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/dgrijalva/jwt-go",
|
||||
"Rev": "01aeca54ebda6e0fbfafd0a524d234159c05ec20"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/elazarl/go-bindata-assetfs",
|
||||
"Rev": "3dcc96556217539f50599357fb481ac0dc7439b9"
|
||||
@ -358,6 +434,10 @@
|
||||
"ImportPath": "github.com/golang/glog",
|
||||
"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/groupcache/lru",
|
||||
"Rev": "02826c3e79038b59d737d3b1c0a1d937f71a4433"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/jsonpb",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
@ -366,6 +446,10 @@
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/ptypes",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
@ -448,15 +532,15 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime",
|
||||
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||
"Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal",
|
||||
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||
"Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities",
|
||||
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||
"Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
@ -486,10 +570,6 @@
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
"Rev": "5b9ff866471762aa2ab2dced63c9fb6f53921342"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/karlseguin/ccache",
|
||||
"Rev": "3ba9789cfd2cb7b4fb4657efc994cc1c599a648c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mailru/easyjson/buffer",
|
||||
"Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d"
|
||||
@ -690,65 +770,85 @@
|
||||
"ImportPath": "golang.org/x/time/rate",
|
||||
"Rev": "f51c12702a4d776e4c1fa9b0fabab841babae631"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/genproto/googleapis/api/annotations",
|
||||
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
|
||||
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/codes",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/connectivity",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/credentials",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/health/grpc_health_v1",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/metadata",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/naming",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/peer",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/stats",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/status",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/tap",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/transport",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/inf.v0",
|
||||
|
@ -13,6 +13,7 @@ go_library(
|
||||
"//vendor/github.com/coreos/etcd/client:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/integration:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/testutil:go_default_library",
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
etcd "github.com/coreos/etcd/client"
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/etcdhttp"
|
||||
"github.com/coreos/etcd/etcdserver/api/v2http"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
@ -154,6 +155,7 @@ func configureTestCluster(t *testing.T, name string, https bool) *EtcdTestServer
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
m.AuthToken = "simple"
|
||||
} else {
|
||||
cln := newLocalListener(t)
|
||||
m.ClientListeners = []net.Listener{cln}
|
||||
@ -189,9 +191,9 @@ func (m *EtcdTestServer) launch(t *testing.T) error {
|
||||
if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil {
|
||||
return fmt.Errorf("failed to initialize the etcd server: %v", err)
|
||||
}
|
||||
m.s.SyncTicker = time.Tick(500 * time.Millisecond)
|
||||
m.s.SyncTicker = time.NewTicker(500 * time.Millisecond)
|
||||
m.s.Start()
|
||||
m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)}
|
||||
m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s)}
|
||||
for _, ln := range m.PeerListeners {
|
||||
hs := &httptest.Server{
|
||||
Listener: ln,
|
||||
|
118
staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json
generated
118
staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json
generated
@ -28,60 +28,60 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/client",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/clientv3",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/fileutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/srv",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/transport",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/version",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-semver/semver",
|
||||
"Rev": "568e959cd89871e61434c1143528d9162da89ef2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-systemd/daemon",
|
||||
"Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-systemd/journal",
|
||||
"Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/pkg/capnslog",
|
||||
"Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8"
|
||||
@ -147,11 +147,11 @@
|
||||
"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/jsonpb",
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
@ -166,10 +166,6 @@
|
||||
"ImportPath": "github.com/golang/protobuf/ptypes/duration",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/ptypes/struct",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/ptypes/timestamp",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
@ -202,22 +198,6 @@
|
||||
"ImportPath": "github.com/gregjones/httpcache/diskcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||
"Rev": "2500245aa6110c562d17020fb31a2c133d737799"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime",
|
||||
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal",
|
||||
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities",
|
||||
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
|
||||
@ -418,65 +398,85 @@
|
||||
"ImportPath": "golang.org/x/text/width",
|
||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/genproto/googleapis/api/annotations",
|
||||
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
|
||||
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/codes",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/connectivity",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/credentials",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/health/grpc_health_v1",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/metadata",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/naming",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/peer",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/stats",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/status",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/tap",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/transport",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/inf.v0",
|
||||
|
118
staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json
generated
118
staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json
generated
@ -28,60 +28,60 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/client",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/clientv3",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/mvcc/mvccpb",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/fileutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/srv",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/transport",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
||||
"Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a"
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/version",
|
||||
"Rev": "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-semver/semver",
|
||||
"Rev": "568e959cd89871e61434c1143528d9162da89ef2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-systemd/daemon",
|
||||
"Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-systemd/journal",
|
||||
"Rev": "48702e0da86bd25e76cfef347e2adeb434a0d0a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/pkg/capnslog",
|
||||
"Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8"
|
||||
@ -139,11 +139,11 @@
|
||||
"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/jsonpb",
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
@ -158,10 +158,6 @@
|
||||
"ImportPath": "github.com/golang/protobuf/ptypes/duration",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/ptypes/struct",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/ptypes/timestamp",
|
||||
"Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
@ -194,22 +190,6 @@
|
||||
"ImportPath": "github.com/gregjones/httpcache/diskcache",
|
||||
"Rev": "787624de3eb7bd915c329cba748687a3b22666a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||
"Rev": "2500245aa6110c562d17020fb31a2c133d737799"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime",
|
||||
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal",
|
||||
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities",
|
||||
"Rev": "84398b94e188ee336f307779b57b3aa91af7063c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
|
||||
@ -402,65 +382,85 @@
|
||||
"ImportPath": "golang.org/x/text/width",
|
||||
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/genproto/googleapis/api/annotations",
|
||||
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/genproto/googleapis/rpc/status",
|
||||
"Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/balancer",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/codes",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/connectivity",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/credentials",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/health/grpc_health_v1",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/keepalive",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/metadata",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/naming",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/peer",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/resolver",
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/stats",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/status",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/tap",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/transport",
|
||||
"Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
"Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/inf.v0",
|
||||
|
@ -77,6 +77,7 @@ func NewEtcd(dataDir string) *EtcdServer {
|
||||
MaxWALFiles: maxWALFiles,
|
||||
TickMs: tickMs,
|
||||
ElectionTicks: electionTicks,
|
||||
AuthToken: "simple",
|
||||
}
|
||||
|
||||
return &EtcdServer{
|
||||
|
@ -14,6 +14,7 @@ go_test(
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library",
|
||||
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/coreos/etcd/etcdserver/api/v3rpc" // Force package logger init.
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
|
10
vendor/BUILD
vendored
10
vendor/BUILD
vendored
@ -45,7 +45,6 @@ filegroup(
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/sts:all-srcs",
|
||||
"//vendor/github.com/beorn7/perks/quantile:all-srcs",
|
||||
"//vendor/github.com/blang/semver:all-srcs",
|
||||
"//vendor/github.com/boltdb/bolt:all-srcs",
|
||||
"//vendor/github.com/chai2010/gettext-go/gettext:all-srcs",
|
||||
"//vendor/github.com/cloudflare/cfssl/auth:all-srcs",
|
||||
"//vendor/github.com/cloudflare/cfssl/certdb:all-srcs",
|
||||
@ -59,6 +58,7 @@ filegroup(
|
||||
"//vendor/github.com/cloudflare/cfssl/ocsp/config:all-srcs",
|
||||
"//vendor/github.com/cloudflare/cfssl/signer:all-srcs",
|
||||
"//vendor/github.com/clusterhq/flocker-go:all-srcs",
|
||||
"//vendor/github.com/cockroachdb/cmux:all-srcs",
|
||||
"//vendor/github.com/codedellemc/goscaleio:all-srcs",
|
||||
"//vendor/github.com/codegangsta/negroni:all-srcs",
|
||||
"//vendor/github.com/container-storage-interface/spec/lib/go/csi:all-srcs",
|
||||
@ -74,12 +74,14 @@ filegroup(
|
||||
"//vendor/github.com/containernetworking/cni/pkg/invoke:all-srcs",
|
||||
"//vendor/github.com/containernetworking/cni/pkg/types:all-srcs",
|
||||
"//vendor/github.com/containernetworking/cni/pkg/version:all-srcs",
|
||||
"//vendor/github.com/coreos/bbolt:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/alarm:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/auth:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/client:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/clientv3:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/compactor:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/discovery:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/embed:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/error:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/integration:all-srcs",
|
||||
@ -87,8 +89,10 @@ filegroup(
|
||||
"//vendor/github.com/coreos/etcd/mvcc:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/pkg/adt:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/pkg/contention:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/pkg/cors:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/pkg/cpuutil:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/pkg/crc:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/pkg/debugutil:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/pkg/fileutil:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/pkg/httputil:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/pkg/idutil:all-srcs",
|
||||
@ -100,6 +104,7 @@ filegroup(
|
||||
"//vendor/github.com/coreos/etcd/pkg/pbutil:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/pkg/runtime:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/pkg/schedule:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/pkg/srv:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/pkg/testutil:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/pkg/tlsutil:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/pkg/transport:all-srcs",
|
||||
@ -211,6 +216,7 @@ filegroup(
|
||||
"//vendor/github.com/golang/mock/gomock:all-srcs",
|
||||
"//vendor/github.com/golang/protobuf/jsonpb:all-srcs",
|
||||
"//vendor/github.com/golang/protobuf/proto:all-srcs",
|
||||
"//vendor/github.com/golang/protobuf/protoc-gen-go/descriptor:all-srcs",
|
||||
"//vendor/github.com/golang/protobuf/ptypes:all-srcs",
|
||||
"//vendor/github.com/google/btree:all-srcs",
|
||||
"//vendor/github.com/google/cadvisor/accelerators:all-srcs",
|
||||
@ -267,7 +273,6 @@ filegroup(
|
||||
"//vendor/github.com/jteeuwen/go-bindata:all-srcs",
|
||||
"//vendor/github.com/juju/ratelimit:all-srcs",
|
||||
"//vendor/github.com/kardianos/osext:all-srcs",
|
||||
"//vendor/github.com/karlseguin/ccache:all-srcs",
|
||||
"//vendor/github.com/kr/fs:all-srcs",
|
||||
"//vendor/github.com/kr/pretty:all-srcs",
|
||||
"//vendor/github.com/kr/pty:all-srcs",
|
||||
@ -391,6 +396,7 @@ filegroup(
|
||||
"//vendor/google.golang.org/api/logging/v2beta1:all-srcs",
|
||||
"//vendor/google.golang.org/api/monitoring/v3:all-srcs",
|
||||
"//vendor/google.golang.org/api/pubsub/v1:all-srcs",
|
||||
"//vendor/google.golang.org/genproto/googleapis/api/annotations:all-srcs",
|
||||
"//vendor/google.golang.org/genproto/googleapis/rpc/status:all-srcs",
|
||||
"//vendor/google.golang.org/grpc:all-srcs",
|
||||
"//vendor/gopkg.in/gcfg.v1:all-srcs",
|
||||
|
18
vendor/github.com/boltdb/bolt/Makefile
generated
vendored
18
vendor/github.com/boltdb/bolt/Makefile
generated
vendored
@ -1,18 +0,0 @@
|
||||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
|
||||
default: build
|
||||
|
||||
race:
|
||||
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck:
|
||||
@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
|
||||
|
||||
test:
|
||||
@go test -v -cover .
|
||||
@go test -v ./cmd/bolt
|
||||
|
||||
.PHONY: fmt test
|
24
vendor/github.com/cockroachdb/cmux/.gitignore
generated
vendored
Normal file
24
vendor/github.com/cockroachdb/cmux/.gitignore
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
22
vendor/github.com/cockroachdb/cmux/.travis.yml
generated
vendored
Normal file
22
vendor/github.com/cockroachdb/cmux/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
|
||||
gobuild_args: -race
|
||||
|
||||
before_install:
|
||||
- go get -u github.com/golang/lint/golint
|
||||
- if [[ $TRAVIS_GO_VERSION == 1.5* ]]; then go get -u github.com/kisielk/errcheck; fi
|
||||
- go get -u golang.org/x/tools/cmd/vet
|
||||
|
||||
before_script:
|
||||
- '! gofmt -s -l . | read'
|
||||
- golint ./...
|
||||
- echo $TRAVIS_GO_VERSION
|
||||
- if [[ $TRAVIS_GO_VERSION == 1.5* ]]; then errcheck ./...; fi
|
||||
- go vet .
|
||||
- go tool vet --shadow .
|
31
vendor/github.com/cockroachdb/cmux/BUILD
generated
vendored
Normal file
31
vendor/github.com/cockroachdb/cmux/BUILD
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"buffer.go",
|
||||
"cmux.go",
|
||||
"matchers.go",
|
||||
"patricia.go",
|
||||
],
|
||||
importpath = "github.com/cockroachdb/cmux",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/golang.org/x/net/http2:go_default_library",
|
||||
"//vendor/golang.org/x/net/http2/hpack:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
202
vendor/github.com/cockroachdb/cmux/LICENSE
generated
vendored
Normal file
202
vendor/github.com/cockroachdb/cmux/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
65
vendor/github.com/cockroachdb/cmux/README.md
generated
vendored
Normal file
65
vendor/github.com/cockroachdb/cmux/README.md
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
# cmux: Connection Mux [](https://travis-ci.org/cockroachdb/cmux) [](https://godoc.org/github.com/cockroachdb/cmux)
|
||||
|
||||
cmux is a generic Go library to multiplex connections based on their payload.
|
||||
Using cmux, you can serve gRPC, SSH, HTTPS, HTTP, Go RPC, and pretty much any
|
||||
other protocol on the same TCP listener.
|
||||
|
||||
## How-To
|
||||
Simply create your main listener, create a cmux for that listener,
|
||||
and then match connections:
|
||||
```go
|
||||
// Create the main listener.
|
||||
l, err := net.Listen("tcp", ":23456")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a cmux.
|
||||
m := cmux.New(l)
|
||||
|
||||
// Match connections in order:
|
||||
// First grpc, then HTTP, and otherwise Go RPC/TCP.
|
||||
grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc"))
|
||||
httpL := m.Match(cmux.HTTP1Fast())
|
||||
trpcL := m.Match(cmux.Any()) // Any means anything that is not yet matched.
|
||||
|
||||
// Create your protocol servers.
|
||||
grpcS := grpc.NewServer()
|
||||
grpchello.RegisterGreeterServer(grpcs, &server{})
|
||||
|
||||
httpS := &http.Server{
|
||||
Handler: &helloHTTP1Handler{},
|
||||
}
|
||||
|
||||
trpcS := rpc.NewServer()
|
||||
s.Register(&ExampleRPCRcvr{})
|
||||
|
||||
// Use the muxed listeners for your servers.
|
||||
go grpcS.Serve(grpcL)
|
||||
go httpS.Serve(httpL)
|
||||
go trpcS.Accept(trpcL)
|
||||
|
||||
// Start serving!
|
||||
m.Serve()
|
||||
```
|
||||
|
||||
There are [more examples on GoDoc](https://godoc.org/github.com/cockroachdb/cmux#pkg-examples).
|
||||
|
||||
## Performance
|
||||
Since we are only matching the very first bytes of a connection, the
|
||||
performance overhead on long-lived connections (i.e., RPCs and pipelined HTTP
|
||||
streams) is negligible.
|
||||
|
||||
## Limitations
|
||||
* *TLS*: `net/http` uses a [type assertion](https://github.com/golang/go/issues/14221)
|
||||
to identify TLS connections; since cmux's lookahead-implementing connection
|
||||
wraps the underlying TLS connection, this type assertion fails. This means you
|
||||
can serve HTTPS using cmux but `http.Request.TLS` will not be set in your
|
||||
handlers. If you are able to wrap TLS around cmux, you can work around this
|
||||
limitation. See https://github.com/cockroachdb/cockroach/commit/83caba2 for an
|
||||
example of this approach.
|
||||
|
||||
* *Different Protocols on The Same Connection*: `cmux` matches the connection
|
||||
when it's accepted. For example, one connection can be either gRPC or REST, but
|
||||
not both. That is, we assume that a client connection is either used for gRPC
|
||||
or REST.
|
35
vendor/github.com/cockroachdb/cmux/buffer.go
generated
vendored
Normal file
35
vendor/github.com/cockroachdb/cmux/buffer.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
package cmux
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// bufferedReader is an optimized implementation of io.Reader that behaves like
|
||||
// ```
|
||||
// io.MultiReader(bytes.NewReader(buffer.Bytes()), io.TeeReader(source, buffer))
|
||||
// ```
|
||||
// without allocating.
|
||||
type bufferedReader struct {
|
||||
source io.Reader
|
||||
buffer *bytes.Buffer
|
||||
bufferRead int
|
||||
bufferSize int
|
||||
}
|
||||
|
||||
func (s *bufferedReader) Read(p []byte) (int, error) {
|
||||
// Functionality of bytes.Reader.
|
||||
bn := copy(p, s.buffer.Bytes()[s.bufferRead:s.bufferSize])
|
||||
s.bufferRead += bn
|
||||
|
||||
p = p[bn:]
|
||||
|
||||
// Funtionality of io.TeeReader.
|
||||
sn, sErr := s.source.Read(p)
|
||||
if sn > 0 {
|
||||
if wn, wErr := s.buffer.Write(p[:sn]); wErr != nil {
|
||||
return bn + wn, wErr
|
||||
}
|
||||
}
|
||||
return bn + sn, sErr
|
||||
}
|
210
vendor/github.com/cockroachdb/cmux/cmux.go
generated
vendored
Normal file
210
vendor/github.com/cockroachdb/cmux/cmux.go
generated
vendored
Normal file
@ -0,0 +1,210 @@
|
||||
package cmux
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Matcher matches a connection based on its content.
|
||||
type Matcher func(io.Reader) bool
|
||||
|
||||
// ErrorHandler handles an error and returns whether
|
||||
// the mux should continue serving the listener.
|
||||
type ErrorHandler func(error) bool
|
||||
|
||||
var _ net.Error = ErrNotMatched{}
|
||||
|
||||
// ErrNotMatched is returned whenever a connection is not matched by any of
|
||||
// the matchers registered in the multiplexer.
|
||||
type ErrNotMatched struct {
|
||||
c net.Conn
|
||||
}
|
||||
|
||||
func (e ErrNotMatched) Error() string {
|
||||
return fmt.Sprintf("mux: connection %v not matched by an matcher",
|
||||
e.c.RemoteAddr())
|
||||
}
|
||||
|
||||
// Temporary implements the net.Error interface.
|
||||
func (e ErrNotMatched) Temporary() bool { return true }
|
||||
|
||||
// Timeout implements the net.Error interface.
|
||||
func (e ErrNotMatched) Timeout() bool { return false }
|
||||
|
||||
type errListenerClosed string
|
||||
|
||||
func (e errListenerClosed) Error() string { return string(e) }
|
||||
func (e errListenerClosed) Temporary() bool { return false }
|
||||
func (e errListenerClosed) Timeout() bool { return false }
|
||||
|
||||
// ErrListenerClosed is returned from muxListener.Accept when the underlying
|
||||
// listener is closed.
|
||||
var ErrListenerClosed = errListenerClosed("mux: listener closed")
|
||||
|
||||
// New instantiates a new connection multiplexer.
|
||||
func New(l net.Listener) CMux {
|
||||
return &cMux{
|
||||
root: l,
|
||||
bufLen: 1024,
|
||||
errh: func(_ error) bool { return true },
|
||||
donec: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// CMux is a multiplexer for network connections.
|
||||
type CMux interface {
|
||||
// Match returns a net.Listener that sees (i.e., accepts) only
|
||||
// the connections matched by at least one of the matcher.
|
||||
//
|
||||
// The order used to call Match determines the priority of matchers.
|
||||
Match(...Matcher) net.Listener
|
||||
// Serve starts multiplexing the listener. Serve blocks and perhaps
|
||||
// should be invoked concurrently within a go routine.
|
||||
Serve() error
|
||||
// HandleError registers an error handler that handles listener errors.
|
||||
HandleError(ErrorHandler)
|
||||
}
|
||||
|
||||
type matchersListener struct {
|
||||
ss []Matcher
|
||||
l muxListener
|
||||
}
|
||||
|
||||
type cMux struct {
|
||||
root net.Listener
|
||||
bufLen int
|
||||
errh ErrorHandler
|
||||
donec chan struct{}
|
||||
sls []matchersListener
|
||||
}
|
||||
|
||||
func (m *cMux) Match(matchers ...Matcher) net.Listener {
|
||||
ml := muxListener{
|
||||
Listener: m.root,
|
||||
connc: make(chan net.Conn, m.bufLen),
|
||||
}
|
||||
m.sls = append(m.sls, matchersListener{ss: matchers, l: ml})
|
||||
return ml
|
||||
}
|
||||
|
||||
func (m *cMux) Serve() error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
defer func() {
|
||||
close(m.donec)
|
||||
wg.Wait()
|
||||
|
||||
for _, sl := range m.sls {
|
||||
close(sl.l.connc)
|
||||
// Drain the connections enqueued for the listener.
|
||||
for c := range sl.l.connc {
|
||||
_ = c.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
c, err := m.root.Accept()
|
||||
if err != nil {
|
||||
if !m.handleErr(err) {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go m.serve(c, m.donec, &wg)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *cMux) serve(c net.Conn, donec <-chan struct{}, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
|
||||
muc := newMuxConn(c)
|
||||
for _, sl := range m.sls {
|
||||
for _, s := range sl.ss {
|
||||
matched := s(muc.getSniffer())
|
||||
if matched {
|
||||
select {
|
||||
case sl.l.connc <- muc:
|
||||
case <-donec:
|
||||
_ = c.Close()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ = c.Close()
|
||||
err := ErrNotMatched{c: c}
|
||||
if !m.handleErr(err) {
|
||||
_ = m.root.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *cMux) HandleError(h ErrorHandler) {
|
||||
m.errh = h
|
||||
}
|
||||
|
||||
func (m *cMux) handleErr(err error) bool {
|
||||
if !m.errh(err) {
|
||||
return false
|
||||
}
|
||||
|
||||
if ne, ok := err.(net.Error); ok {
|
||||
return ne.Temporary()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type muxListener struct {
|
||||
net.Listener
|
||||
connc chan net.Conn
|
||||
}
|
||||
|
||||
func (l muxListener) Accept() (net.Conn, error) {
|
||||
c, ok := <-l.connc
|
||||
if !ok {
|
||||
return nil, ErrListenerClosed
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// MuxConn wraps a net.Conn and provides transparent sniffing of connection data.
|
||||
type MuxConn struct {
|
||||
net.Conn
|
||||
buf bytes.Buffer
|
||||
sniffer bufferedReader
|
||||
}
|
||||
|
||||
func newMuxConn(c net.Conn) *MuxConn {
|
||||
return &MuxConn{
|
||||
Conn: c,
|
||||
}
|
||||
}
|
||||
|
||||
// From the io.Reader documentation:
|
||||
//
|
||||
// When Read encounters an error or end-of-file condition after
|
||||
// successfully reading n > 0 bytes, it returns the number of
|
||||
// bytes read. It may return the (non-nil) error from the same call
|
||||
// or return the error (and n == 0) from a subsequent call.
|
||||
// An instance of this general case is that a Reader returning
|
||||
// a non-zero number of bytes at the end of the input stream may
|
||||
// return either err == EOF or err == nil. The next Read should
|
||||
// return 0, EOF.
|
||||
func (m *MuxConn) Read(p []byte) (int, error) {
|
||||
if n, err := m.buf.Read(p); err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
return m.Conn.Read(p)
|
||||
}
|
||||
|
||||
func (m *MuxConn) getSniffer() io.Reader {
|
||||
m.sniffer = bufferedReader{source: m.Conn, buffer: &m.buf, bufferSize: m.buf.Len()}
|
||||
return &m.sniffer
|
||||
}
|
150
vendor/github.com/cockroachdb/cmux/matchers.go
generated
vendored
Normal file
150
vendor/github.com/cockroachdb/cmux/matchers.go
generated
vendored
Normal file
@ -0,0 +1,150 @@
|
||||
package cmux
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
// Any is a Matcher that matches any connection.
|
||||
func Any() Matcher {
|
||||
return func(r io.Reader) bool { return true }
|
||||
}
|
||||
|
||||
// PrefixMatcher returns a matcher that matches a connection if it
|
||||
// starts with any of the strings in strs.
|
||||
func PrefixMatcher(strs ...string) Matcher {
|
||||
pt := newPatriciaTreeString(strs...)
|
||||
return pt.matchPrefix
|
||||
}
|
||||
|
||||
var defaultHTTPMethods = []string{
|
||||
"OPTIONS",
|
||||
"GET",
|
||||
"HEAD",
|
||||
"POST",
|
||||
"PUT",
|
||||
"DELETE",
|
||||
"TRACE",
|
||||
"CONNECT",
|
||||
}
|
||||
|
||||
// HTTP1Fast only matches the methods in the HTTP request.
|
||||
//
|
||||
// This matcher is very optimistic: if it returns true, it does not mean that
|
||||
// the request is a valid HTTP response. If you want a correct but slower HTTP1
|
||||
// matcher, use HTTP1 instead.
|
||||
func HTTP1Fast(extMethods ...string) Matcher {
|
||||
return PrefixMatcher(append(defaultHTTPMethods, extMethods...)...)
|
||||
}
|
||||
|
||||
const maxHTTPRead = 4096
|
||||
|
||||
// HTTP1 parses the first line or upto 4096 bytes of the request to see if
|
||||
// the conection contains an HTTP request.
|
||||
func HTTP1() Matcher {
|
||||
return func(r io.Reader) bool {
|
||||
br := bufio.NewReader(&io.LimitedReader{R: r, N: maxHTTPRead})
|
||||
l, part, err := br.ReadLine()
|
||||
if err != nil || part {
|
||||
return false
|
||||
}
|
||||
|
||||
_, _, proto, ok := parseRequestLine(string(l))
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
v, _, ok := http.ParseHTTPVersion(proto)
|
||||
return ok && v == 1
|
||||
}
|
||||
}
|
||||
|
||||
// grabbed from net/http.
|
||||
func parseRequestLine(line string) (method, uri, proto string, ok bool) {
|
||||
s1 := strings.Index(line, " ")
|
||||
s2 := strings.Index(line[s1+1:], " ")
|
||||
if s1 < 0 || s2 < 0 {
|
||||
return
|
||||
}
|
||||
s2 += s1 + 1
|
||||
return line[:s1], line[s1+1 : s2], line[s2+1:], true
|
||||
}
|
||||
|
||||
// HTTP2 parses the frame header of the first frame to detect whether the
|
||||
// connection is an HTTP2 connection.
|
||||
func HTTP2() Matcher {
|
||||
return hasHTTP2Preface
|
||||
}
|
||||
|
||||
// HTTP1HeaderField returns a matcher matching the header fields of the first
|
||||
// request of an HTTP 1 connection.
|
||||
func HTTP1HeaderField(name, value string) Matcher {
|
||||
return func(r io.Reader) bool {
|
||||
return matchHTTP1Field(r, name, value)
|
||||
}
|
||||
}
|
||||
|
||||
// HTTP2HeaderField resturns a matcher matching the header fields of the first
|
||||
// headers frame.
|
||||
func HTTP2HeaderField(name, value string) Matcher {
|
||||
return func(r io.Reader) bool {
|
||||
return matchHTTP2Field(r, name, value)
|
||||
}
|
||||
}
|
||||
|
||||
func hasHTTP2Preface(r io.Reader) bool {
|
||||
var b [len(http2.ClientPreface)]byte
|
||||
if _, err := io.ReadFull(r, b[:]); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return string(b[:]) == http2.ClientPreface
|
||||
}
|
||||
|
||||
func matchHTTP1Field(r io.Reader, name, value string) (matched bool) {
|
||||
req, err := http.ReadRequest(bufio.NewReader(r))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return req.Header.Get(name) == value
|
||||
}
|
||||
|
||||
func matchHTTP2Field(r io.Reader, name, value string) (matched bool) {
|
||||
if !hasHTTP2Preface(r) {
|
||||
return false
|
||||
}
|
||||
|
||||
framer := http2.NewFramer(ioutil.Discard, r)
|
||||
hdec := hpack.NewDecoder(uint32(4<<10), func(hf hpack.HeaderField) {
|
||||
if hf.Name == name && hf.Value == value {
|
||||
matched = true
|
||||
}
|
||||
})
|
||||
for {
|
||||
f, err := framer.ReadFrame()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
switch f := f.(type) {
|
||||
case *http2.HeadersFrame:
|
||||
if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil {
|
||||
return false
|
||||
}
|
||||
if matched {
|
||||
return true
|
||||
}
|
||||
|
||||
if f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
173
vendor/github.com/cockroachdb/cmux/patricia.go
generated
vendored
Normal file
173
vendor/github.com/cockroachdb/cmux/patricia.go
generated
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
package cmux
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// patriciaTree is a simple patricia tree that handles []byte instead of string
|
||||
// and cannot be changed after instantiation.
|
||||
type patriciaTree struct {
|
||||
root *ptNode
|
||||
}
|
||||
|
||||
func newPatriciaTree(b ...[]byte) *patriciaTree {
|
||||
return &patriciaTree{
|
||||
root: newNode(b),
|
||||
}
|
||||
}
|
||||
|
||||
func newPatriciaTreeString(strs ...string) *patriciaTree {
|
||||
b := make([][]byte, len(strs))
|
||||
for i, s := range strs {
|
||||
b[i] = []byte(s)
|
||||
}
|
||||
return &patriciaTree{
|
||||
root: newNode(b),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *patriciaTree) matchPrefix(r io.Reader) bool {
|
||||
return t.root.match(r, true)
|
||||
}
|
||||
|
||||
func (t *patriciaTree) match(r io.Reader) bool {
|
||||
return t.root.match(r, false)
|
||||
}
|
||||
|
||||
type ptNode struct {
|
||||
prefix []byte
|
||||
next map[byte]*ptNode
|
||||
terminal bool
|
||||
}
|
||||
|
||||
func newNode(strs [][]byte) *ptNode {
|
||||
if len(strs) == 0 {
|
||||
return &ptNode{
|
||||
prefix: []byte{},
|
||||
terminal: true,
|
||||
}
|
||||
}
|
||||
|
||||
if len(strs) == 1 {
|
||||
return &ptNode{
|
||||
prefix: strs[0],
|
||||
terminal: true,
|
||||
}
|
||||
}
|
||||
|
||||
p, strs := splitPrefix(strs)
|
||||
n := &ptNode{
|
||||
prefix: p,
|
||||
}
|
||||
|
||||
nexts := make(map[byte][][]byte)
|
||||
for _, s := range strs {
|
||||
if len(s) == 0 {
|
||||
n.terminal = true
|
||||
continue
|
||||
}
|
||||
nexts[s[0]] = append(nexts[s[0]], s[1:])
|
||||
}
|
||||
|
||||
n.next = make(map[byte]*ptNode)
|
||||
for first, rests := range nexts {
|
||||
n.next[first] = newNode(rests)
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func splitPrefix(bss [][]byte) (prefix []byte, rest [][]byte) {
|
||||
if len(bss) == 0 || len(bss[0]) == 0 {
|
||||
return prefix, bss
|
||||
}
|
||||
|
||||
if len(bss) == 1 {
|
||||
return bss[0], [][]byte{{}}
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
var cur byte
|
||||
eq := true
|
||||
for j, b := range bss {
|
||||
if len(b) <= i {
|
||||
eq = false
|
||||
break
|
||||
}
|
||||
|
||||
if j == 0 {
|
||||
cur = b[i]
|
||||
continue
|
||||
}
|
||||
|
||||
if cur != b[i] {
|
||||
eq = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !eq {
|
||||
break
|
||||
}
|
||||
|
||||
prefix = append(prefix, cur)
|
||||
}
|
||||
|
||||
rest = make([][]byte, 0, len(bss))
|
||||
for _, b := range bss {
|
||||
rest = append(rest, b[len(prefix):])
|
||||
}
|
||||
|
||||
return prefix, rest
|
||||
}
|
||||
|
||||
func readBytes(r io.Reader, n int) (b []byte, err error) {
|
||||
b = make([]byte, n)
|
||||
o := 0
|
||||
for o < n {
|
||||
nr, err := r.Read(b[o:])
|
||||
if err != nil && err != io.EOF {
|
||||
return b, err
|
||||
}
|
||||
|
||||
o += nr
|
||||
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
return b[:o], nil
|
||||
}
|
||||
|
||||
func (n *ptNode) match(r io.Reader, prefix bool) bool {
|
||||
if l := len(n.prefix); l > 0 {
|
||||
b, err := readBytes(r, l)
|
||||
if err != nil || len(b) != l || !bytes.Equal(b, n.prefix) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if prefix && n.terminal {
|
||||
return true
|
||||
}
|
||||
|
||||
b := make([]byte, 1)
|
||||
for {
|
||||
nr, err := r.Read(b)
|
||||
if nr != 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
return n.terminal
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
nextN, ok := n.next[b[0]]
|
||||
return ok && nextN.match(r, prefix)
|
||||
}
|
1
vendor/github.com/boltdb/bolt/.gitignore → vendor/github.com/coreos/bbolt/.gitignore
generated
vendored
1
vendor/github.com/boltdb/bolt/.gitignore → vendor/github.com/coreos/bbolt/.gitignore
generated
vendored
@ -2,3 +2,4 @@
|
||||
*.test
|
||||
*.swp
|
||||
/bin/
|
||||
cmd/bolt/bolt
|
2
vendor/github.com/boltdb/bolt/BUILD → vendor/github.com/coreos/bbolt/BUILD
generated
vendored
2
vendor/github.com/boltdb/bolt/BUILD → vendor/github.com/coreos/bbolt/BUILD
generated
vendored
@ -28,7 +28,7 @@ go_library(
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "github.com/boltdb/bolt",
|
||||
importpath = "github.com/coreos/bbolt",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
0
vendor/github.com/boltdb/bolt/LICENSE → vendor/github.com/coreos/bbolt/LICENSE
generated
vendored
0
vendor/github.com/boltdb/bolt/LICENSE → vendor/github.com/coreos/bbolt/LICENSE
generated
vendored
30
vendor/github.com/coreos/bbolt/Makefile
generated
vendored
Normal file
30
vendor/github.com/coreos/bbolt/Makefile
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
|
||||
default: build
|
||||
|
||||
race:
|
||||
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
|
||||
fmt:
|
||||
!(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
|
||||
|
||||
# go get honnef.co/go/tools/simple
|
||||
gosimple:
|
||||
gosimple ./...
|
||||
|
||||
# go get honnef.co/go/tools/unused
|
||||
unused:
|
||||
unused ./...
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck:
|
||||
@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/coreos/bbolt
|
||||
|
||||
test:
|
||||
go test -timeout 20m -v -coverprofile cover.out -covermode atomic
|
||||
# Note: gets "program not an importable package" in out of path builds
|
||||
go test -v ./cmd/bolt
|
||||
|
||||
.PHONY: race fmt errcheck test gosimple unused
|
100
vendor/github.com/boltdb/bolt/README.md → vendor/github.com/coreos/bbolt/README.md
generated
vendored
100
vendor/github.com/boltdb/bolt/README.md → vendor/github.com/coreos/bbolt/README.md
generated
vendored
@ -1,6 +1,16 @@
|
||||
Bolt [](https://coveralls.io/r/boltdb/bolt?branch=master) [](https://godoc.org/github.com/boltdb/bolt) 
|
||||
bbolt
|
||||
====
|
||||
|
||||
[](https://goreportcard.com/report/github.com/coreos/bbolt)
|
||||
[](https://codecov.io/gh/coreos/bbolt)
|
||||
[](https://godoc.org/github.com/coreos/bbolt)
|
||||
|
||||
bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value
|
||||
store. The purpose of this fork is to provide the Go community with an active
|
||||
maintenance and development target for Bolt; the goal is improved reliability
|
||||
and stability. bbolt includes bug fixes, performance enhancements, and features
|
||||
not found in Bolt while preserving backwards compatibility with the Bolt API.
|
||||
|
||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
||||
[LMDB project][lmdb]. The goal of the project is to provide a simple,
|
||||
fast, and reliable database for projects that don't require a full database
|
||||
@ -10,16 +20,18 @@ Since Bolt is meant to be used as such a low-level piece of functionality,
|
||||
simplicity is key. The API will be small and only focus on getting values
|
||||
and setting values. That's it.
|
||||
|
||||
[gh_ben]: https://github.com/benbjohnson
|
||||
[bolt]: https://github.com/boltdb/bolt
|
||||
[hyc_symas]: https://twitter.com/hyc_symas
|
||||
[lmdb]: http://symas.com/mdb/
|
||||
|
||||
## Project Status
|
||||
|
||||
Bolt is stable and the API is fixed. Full unit test coverage and randomized
|
||||
black box testing are used to ensure database consistency and thread safety.
|
||||
Bolt is currently in high-load production environments serving databases as
|
||||
large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
|
||||
services every day.
|
||||
Bolt is stable, the API is fixed, and the file format is fixed. Full unit
|
||||
test coverage and randomized black box testing are used to ensure database
|
||||
consistency and thread safety. Bolt is currently used in high-load production
|
||||
environments serving databases as large as 1TB. Many companies such as
|
||||
Shopify and Heroku use Bolt-backed services every day.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
@ -59,7 +71,7 @@ services every day.
|
||||
To start using Bolt, install Go and run `go get`:
|
||||
|
||||
```sh
|
||||
$ go get github.com/boltdb/bolt/...
|
||||
$ go get github.com/coreos/bbolt/...
|
||||
```
|
||||
|
||||
This will retrieve the library and install the `bolt` command line utility into
|
||||
@ -79,7 +91,7 @@ package main
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
bolt "github.com/coreos/bbolt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@ -209,7 +221,7 @@ and then safely close your transaction if an error is returned. This is the
|
||||
recommended way to use Bolt transactions.
|
||||
|
||||
However, sometimes you may want to manually start and end your transactions.
|
||||
You can use the `Tx.Begin()` function directly but **please** be sure to close
|
||||
You can use the `DB.Begin()` function directly but **please** be sure to close
|
||||
the transaction.
|
||||
|
||||
```go
|
||||
@ -395,7 +407,7 @@ db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket([]byte("MyBucket")).Cursor()
|
||||
|
||||
prefix := []byte("1234")
|
||||
for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||
for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
@ -448,6 +460,10 @@ db.View(func(tx *bolt.Tx) error {
|
||||
})
|
||||
```
|
||||
|
||||
Please note that keys and values in `ForEach()` are only valid while
|
||||
the transaction is open. If you need to use a key or value outside of
|
||||
the transaction, you must use `copy()` to copy it to another byte
|
||||
slice.
|
||||
|
||||
### Nested buckets
|
||||
|
||||
@ -460,6 +476,55 @@ func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
|
||||
func (*Bucket) DeleteBucket(key []byte) error
|
||||
```
|
||||
|
||||
Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
|
||||
|
||||
```go
|
||||
|
||||
// createUser creates a new user in the given account.
|
||||
func createUser(accountID int, u *User) error {
|
||||
// Start the transaction.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Retrieve the root bucket for the account.
|
||||
// Assume this has already been created when the account was set up.
|
||||
root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
|
||||
|
||||
// Setup the users bucket.
|
||||
bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Generate an ID for the new user.
|
||||
userID, err := bkt.NextSequence()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u.ID = userID
|
||||
|
||||
// Marshal and save the encoded user.
|
||||
if buf, err := json.Marshal(u); err != nil {
|
||||
return err
|
||||
} else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the transaction.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
### Database backups
|
||||
|
||||
@ -469,7 +534,7 @@ this from a read-only transaction, it will perform a hot backup and not block
|
||||
your other database reads and writes.
|
||||
|
||||
By default, it will use a regular file handle which will utilize the operating
|
||||
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
|
||||
system's page cache. See the [`Tx`](https://godoc.org/github.com/coreos/bbolt#Tx)
|
||||
documentation for information about optimizing for larger-than-RAM datasets.
|
||||
|
||||
One common use case is to backup over HTTP so you can use tools like `cURL` to
|
||||
@ -715,6 +780,9 @@ Here are a few things to note when evaluating and using Bolt:
|
||||
can be reused by a new page or can be unmapped from virtual memory and you'll
|
||||
see an `unexpected fault address` panic when accessing it.
|
||||
|
||||
* Bolt uses an exclusive write lock on the database file so it cannot be
|
||||
shared by multiple processes.
|
||||
|
||||
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
|
||||
buckets that have random inserts will cause your database to have very poor
|
||||
page utilization.
|
||||
@ -755,7 +823,7 @@ Here are a few things to note when evaluating and using Bolt:
|
||||
|
||||
## Reading the Source
|
||||
|
||||
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
|
||||
Bolt is a relatively small code base (<5KLOC) for an embedded, serializable,
|
||||
transactional key/value database so it can be a good starting point for people
|
||||
interested in how databases work.
|
||||
|
||||
@ -848,5 +916,13 @@ Below is a list of public, open source projects that use Bolt:
|
||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
|
||||
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
|
||||
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
|
||||
* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support.
|
||||
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
|
||||
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
|
||||
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
|
||||
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
|
||||
|
||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
@ -5,3 +5,6 @@ const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
@ -5,3 +5,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
28
vendor/github.com/coreos/bbolt/bolt_arm.go
generated
vendored
Normal file
28
vendor/github.com/coreos/bbolt/bolt_arm.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
package bolt
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned bool
|
||||
|
||||
func init() {
|
||||
// Simple check to see whether this arch handles unaligned load/stores
|
||||
// correctly.
|
||||
|
||||
// ARM9 and older devices require load/stores to be from/to aligned
|
||||
// addresses. If not, the lower 2 bits are cleared and that address is
|
||||
// read in a jumbled up order.
|
||||
|
||||
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
|
||||
|
||||
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
|
||||
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
|
||||
|
||||
brokenUnaligned = val != 0x11222211
|
||||
}
|
@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
12
vendor/github.com/coreos/bbolt/bolt_mips64x.go
generated
vendored
Normal file
12
vendor/github.com/coreos/bbolt/bolt_mips64x.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build mips64 mips64le
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x8000000000 // 512GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
@ -1,7 +1,12 @@
|
||||
// +build mips mipsle
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
const maxMapSize = 0x40000000 // 1GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
33
vendor/github.com/boltdb/bolt/bolt_unix.go → vendor/github.com/coreos/bbolt/bolt_unix.go
generated
vendored
33
vendor/github.com/boltdb/bolt/bolt_unix.go → vendor/github.com/coreos/bbolt/bolt_unix.go
generated
vendored
@ -13,29 +13,32 @@ import (
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
if timeout != 0 {
|
||||
t = time.Now()
|
||||
}
|
||||
fd := db.file.Fd()
|
||||
flag := syscall.LOCK_NB
|
||||
if exclusive {
|
||||
flag |= syscall.LOCK_EX
|
||||
} else {
|
||||
flag |= syscall.LOCK_SH
|
||||
}
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
flag := syscall.LOCK_SH
|
||||
if exclusive {
|
||||
flag = syscall.LOCK_EX
|
||||
}
|
||||
|
||||
// Otherwise attempt to obtain an exclusive lock.
|
||||
err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
|
||||
// Attempt to obtain an exclusive lock.
|
||||
err := syscall.Flock(int(fd), flag)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EWOULDBLOCK {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we timed out then return an error.
|
||||
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
time.Sleep(flockRetryTimeout)
|
||||
}
|
||||
}
|
||||
|
@ -13,34 +13,33 @@ import (
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
if timeout != 0 {
|
||||
t = time.Now()
|
||||
}
|
||||
fd := db.file.Fd()
|
||||
var lockType int16
|
||||
if exclusive {
|
||||
lockType = syscall.F_WRLCK
|
||||
} else {
|
||||
lockType = syscall.F_RDLCK
|
||||
}
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Pid = 0
|
||||
lock.Whence = 0
|
||||
lock.Pid = 0
|
||||
if exclusive {
|
||||
lock.Type = syscall.F_WRLCK
|
||||
} else {
|
||||
lock.Type = syscall.F_RDLCK
|
||||
}
|
||||
err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
|
||||
// Attempt to obtain an exclusive lock.
|
||||
lock := syscall.Flock_t{Type: lockType}
|
||||
err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EAGAIN {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we timed out then return an error.
|
||||
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
time.Sleep(flockRetryTimeout)
|
||||
}
|
||||
}
|
||||
|
@ -59,29 +59,30 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro
|
||||
db.lockfile = f
|
||||
|
||||
var t time.Time
|
||||
if timeout != 0 {
|
||||
t = time.Now()
|
||||
}
|
||||
fd := f.Fd()
|
||||
var flag uint32 = flagLockFailImmediately
|
||||
if exclusive {
|
||||
flag |= flagLockExclusive
|
||||
}
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
var flag uint32 = flagLockFailImmediately
|
||||
if exclusive {
|
||||
flag |= flagLockExclusive
|
||||
}
|
||||
|
||||
err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
|
||||
// Attempt to obtain an exclusive lock.
|
||||
err := lockFileEx(syscall.Handle(fd), flag, 0, 1, 0, &syscall.Overlapped{})
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != errLockViolation {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we timed oumercit then return an error.
|
||||
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
time.Sleep(flockRetryTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
@ -89,7 +90,7 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro
|
||||
func funlock(db *DB) error {
|
||||
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
|
||||
db.lockfile.Close()
|
||||
os.Remove(db.path+lockExt)
|
||||
os.Remove(db.path + lockExt)
|
||||
return err
|
||||
}
|
||||
|
49
vendor/github.com/boltdb/bolt/bucket.go → vendor/github.com/coreos/bbolt/bucket.go
generated
vendored
49
vendor/github.com/boltdb/bolt/bucket.go → vendor/github.com/coreos/bbolt/bucket.go
generated
vendored
@ -14,13 +14,6 @@ const (
|
||||
MaxValueSize = (1 << 31) - 2
|
||||
)
|
||||
|
||||
const (
|
||||
maxUint = ^uint(0)
|
||||
minUint = 0
|
||||
maxInt = int(^uint(0) >> 1)
|
||||
minInt = -maxInt - 1
|
||||
)
|
||||
|
||||
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
|
||||
|
||||
const (
|
||||
@ -130,9 +123,17 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
|
||||
func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||
var child = newBucket(b.tx)
|
||||
|
||||
// If unaligned load/stores are broken on this arch and value is
|
||||
// unaligned simply clone to an aligned byte array.
|
||||
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
|
||||
|
||||
if unaligned {
|
||||
value = cloneBytes(value)
|
||||
}
|
||||
|
||||
// If this is a writable transaction then we need to copy the bucket entry.
|
||||
// Read-only transactions can point directly at the mmap entry.
|
||||
if b.tx.writable {
|
||||
if b.tx.writable && !unaligned {
|
||||
child.bucket = &bucket{}
|
||||
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
|
||||
} else {
|
||||
@ -167,9 +168,8 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
|
||||
if bytes.Equal(key, k) {
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return nil, ErrBucketExists
|
||||
} else {
|
||||
return nil, ErrIncompatibleValue
|
||||
}
|
||||
return nil, ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Create empty, inline bucket.
|
||||
@ -316,7 +316,12 @@ func (b *Bucket) Delete(key []byte) error {
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
_, _, flags := c.seek(key)
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return nil if the key doesn't exist.
|
||||
if !bytes.Equal(key, k) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return an error if there is already existing bucket value.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
@ -329,6 +334,28 @@ func (b *Bucket) Delete(key []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sequence returns the current integer for the bucket without incrementing it.
|
||||
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
|
||||
|
||||
// SetSequence updates the sequence number for the bucket.
|
||||
func (b *Bucket) SetSequence(v uint64) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Materialize the root node if it hasn't been already so that the
|
||||
// bucket will be saved during commit.
|
||||
if b.rootNode == nil {
|
||||
_ = b.node(b.root, nil)
|
||||
}
|
||||
|
||||
// Increment and return the sequence.
|
||||
b.bucket.sequence = v
|
||||
return nil
|
||||
}
|
||||
|
||||
// NextSequence returns an autoincrementing integer for the bucket.
|
||||
func (b *Bucket) NextSequence() (uint64, error) {
|
||||
if b.tx.db == nil {
|
0
vendor/github.com/boltdb/bolt/cursor.go → vendor/github.com/coreos/bbolt/cursor.go
generated
vendored
0
vendor/github.com/boltdb/bolt/cursor.go → vendor/github.com/coreos/bbolt/cursor.go
generated
vendored
211
vendor/github.com/boltdb/bolt/db.go → vendor/github.com/coreos/bbolt/db.go
generated
vendored
211
vendor/github.com/boltdb/bolt/db.go → vendor/github.com/coreos/bbolt/db.go
generated
vendored
@ -7,8 +7,7 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
@ -23,6 +22,8 @@ const version = 2
|
||||
// Represents a marker value to indicate that a file is a Bolt DB.
|
||||
const magic uint32 = 0xED0CDAED
|
||||
|
||||
const pgidNoFreelist pgid = 0xffffffffffffffff
|
||||
|
||||
// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
|
||||
// syncing changes to a file. This is required as some operating systems,
|
||||
// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
|
||||
@ -39,6 +40,9 @@ const (
|
||||
// default page size for db is set to the OS page size.
|
||||
var defaultPageSize = os.Getpagesize()
|
||||
|
||||
// The time elapsed between consecutive file locking attempts.
|
||||
const flockRetryTimeout = 50 * time.Millisecond
|
||||
|
||||
// DB represents a collection of buckets persisted to a file on disk.
|
||||
// All data access is performed through transactions which can be obtained through the DB.
|
||||
// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
|
||||
@ -61,6 +65,11 @@ type DB struct {
|
||||
// THIS IS UNSAFE. PLEASE USE WITH CAUTION.
|
||||
NoSync bool
|
||||
|
||||
// When true, skips syncing freelist to disk. This improves the database
|
||||
// write performance under normal operation, but requires a full database
|
||||
// re-sync during recovery.
|
||||
NoFreelistSync bool
|
||||
|
||||
// When true, skips the truncate call when growing the database.
|
||||
// Setting this to true is only safe on non-ext3/ext4 systems.
|
||||
// Skipping truncation avoids preallocation of hard drive space and
|
||||
@ -107,9 +116,11 @@ type DB struct {
|
||||
opened bool
|
||||
rwtx *Tx
|
||||
txs []*Tx
|
||||
freelist *freelist
|
||||
stats Stats
|
||||
|
||||
freelist *freelist
|
||||
freelistLoad sync.Once
|
||||
|
||||
pagePool sync.Pool
|
||||
|
||||
batchMu sync.Mutex
|
||||
@ -148,14 +159,17 @@ func (db *DB) String() string {
|
||||
// If the file does not exist then it will be created automatically.
|
||||
// Passing in nil options will cause Bolt to open the database with the default options.
|
||||
func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
var db = &DB{opened: true}
|
||||
|
||||
db := &DB{
|
||||
opened: true,
|
||||
}
|
||||
// Set default options if no options are provided.
|
||||
if options == nil {
|
||||
options = DefaultOptions
|
||||
}
|
||||
db.NoSync = options.NoSync
|
||||
db.NoGrowSync = options.NoGrowSync
|
||||
db.MmapFlags = options.MmapFlags
|
||||
db.NoFreelistSync = options.NoFreelistSync
|
||||
|
||||
// Set default values for later DB operations.
|
||||
db.MaxBatchSize = DefaultMaxBatchSize
|
||||
@ -184,6 +198,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
// The database file is locked using the shared lock (more than one process may
|
||||
// hold a lock at the same time) otherwise (options.ReadOnly is set).
|
||||
if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil {
|
||||
db.lockfile = nil // make 'unused' happy. TODO: rework locks
|
||||
_ = db.close()
|
||||
return nil, err
|
||||
}
|
||||
@ -191,6 +206,11 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
// Default values for test hooks
|
||||
db.ops.writeAt = db.file.WriteAt
|
||||
|
||||
if db.pageSize = options.PageSize; db.pageSize == 0 {
|
||||
// Set the default page size to the OS page size.
|
||||
db.pageSize = defaultPageSize
|
||||
}
|
||||
|
||||
// Initialize the database if it doesn't exist.
|
||||
if info, err := db.file.Stat(); err != nil {
|
||||
return nil, err
|
||||
@ -202,20 +222,21 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
} else {
|
||||
// Read the first meta page to determine the page size.
|
||||
var buf [0x1000]byte
|
||||
if _, err := db.file.ReadAt(buf[:], 0); err == nil {
|
||||
m := db.pageInBuffer(buf[:], 0).meta()
|
||||
if err := m.validate(); err != nil {
|
||||
// If we can't read the page size, we can assume it's the same
|
||||
// as the OS -- since that's how the page size was chosen in the
|
||||
// first place.
|
||||
//
|
||||
// If the first page is invalid and this OS uses a different
|
||||
// page size than what the database was created with then we
|
||||
// are out of luck and cannot access the database.
|
||||
db.pageSize = os.Getpagesize()
|
||||
} else {
|
||||
// If we can't read the page size, but can read a page, assume
|
||||
// it's the same as the OS or one given -- since that's how the
|
||||
// page size was chosen in the first place.
|
||||
//
|
||||
// If the first page is invalid and this OS uses a different
|
||||
// page size than what the database was created with then we
|
||||
// are out of luck and cannot access the database.
|
||||
//
|
||||
// TODO: scan for next page
|
||||
if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
|
||||
if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
|
||||
db.pageSize = int(m.pageSize)
|
||||
}
|
||||
} else {
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
}
|
||||
|
||||
@ -232,14 +253,50 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read in the freelist.
|
||||
db.freelist = newFreelist()
|
||||
db.freelist.read(db.page(db.meta().freelist))
|
||||
if db.readOnly {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
db.loadFreelist()
|
||||
|
||||
// Flush freelist when transitioning from no sync to sync so
|
||||
// NoFreelistSync unaware boltdb can open the db later.
|
||||
if !db.NoFreelistSync && !db.hasSyncedFreelist() {
|
||||
tx, err := db.Begin(true)
|
||||
if tx != nil {
|
||||
err = tx.Commit()
|
||||
}
|
||||
if err != nil {
|
||||
_ = db.close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Mark the database as opened and return.
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// loadFreelist reads the freelist if it is synced, or reconstructs it
|
||||
// by scanning the DB if it is not synced. It assumes there are no
|
||||
// concurrent accesses being made to the freelist.
|
||||
func (db *DB) loadFreelist() {
|
||||
db.freelistLoad.Do(func() {
|
||||
db.freelist = newFreelist()
|
||||
if !db.hasSyncedFreelist() {
|
||||
// Reconstruct free list by scanning the DB.
|
||||
db.freelist.readIDs(db.freepages())
|
||||
} else {
|
||||
// Read free list from freelist page.
|
||||
db.freelist.read(db.page(db.meta().freelist))
|
||||
}
|
||||
db.stats.FreePageN = len(db.freelist.ids)
|
||||
})
|
||||
}
|
||||
|
||||
func (db *DB) hasSyncedFreelist() bool {
|
||||
return db.meta().freelist != pgidNoFreelist
|
||||
}
|
||||
|
||||
// mmap opens the underlying memory-mapped file and initializes the meta references.
|
||||
// minsz is the minimum size that the new mmap can be.
|
||||
func (db *DB) mmap(minsz int) error {
|
||||
@ -341,9 +398,6 @@ func (db *DB) mmapSize(size int) (int, error) {
|
||||
|
||||
// init creates a new database file and initializes its meta pages.
|
||||
func (db *DB) init() error {
|
||||
// Set the page size to the OS page size.
|
||||
db.pageSize = os.Getpagesize()
|
||||
|
||||
// Create two meta pages on a buffer.
|
||||
buf := make([]byte, db.pageSize*4)
|
||||
for i := 0; i < 2; i++ {
|
||||
@ -526,21 +580,36 @@ func (db *DB) beginRWTx() (*Tx, error) {
|
||||
t := &Tx{writable: true}
|
||||
t.init(db)
|
||||
db.rwtx = t
|
||||
db.freePages()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Free any pages associated with closed read-only transactions.
|
||||
var minid txid = 0xFFFFFFFFFFFFFFFF
|
||||
for _, t := range db.txs {
|
||||
if t.meta.txid < minid {
|
||||
minid = t.meta.txid
|
||||
}
|
||||
// freePages releases any pages associated with closed read-only transactions.
|
||||
func (db *DB) freePages() {
|
||||
// Free all pending pages prior to earliest open transaction.
|
||||
sort.Sort(txsById(db.txs))
|
||||
minid := txid(0xFFFFFFFFFFFFFFFF)
|
||||
if len(db.txs) > 0 {
|
||||
minid = db.txs[0].meta.txid
|
||||
}
|
||||
if minid > 0 {
|
||||
db.freelist.release(minid - 1)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
// Release unused txid extents.
|
||||
for _, t := range db.txs {
|
||||
db.freelist.releaseRange(minid, t.meta.txid-1)
|
||||
minid = t.meta.txid + 1
|
||||
}
|
||||
db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF))
|
||||
// Any page both allocated and freed in an extent is safe to release.
|
||||
}
|
||||
|
||||
type txsById []*Tx
|
||||
|
||||
func (t txsById) Len() int { return len(t) }
|
||||
func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
|
||||
func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid }
|
||||
|
||||
// removeTx removes a transaction from the database.
|
||||
func (db *DB) removeTx(tx *Tx) {
|
||||
// Release the read lock on the mmap.
|
||||
@ -552,7 +621,10 @@ func (db *DB) removeTx(tx *Tx) {
|
||||
// Remove the transaction.
|
||||
for i, t := range db.txs {
|
||||
if t == tx {
|
||||
db.txs = append(db.txs[:i], db.txs[i+1:]...)
|
||||
last := len(db.txs) - 1
|
||||
db.txs[i] = db.txs[last]
|
||||
db.txs[last] = nil
|
||||
db.txs = db.txs[:last]
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -630,11 +702,7 @@ func (db *DB) View(fn func(*Tx) error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.Rollback(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return t.Rollback()
|
||||
}
|
||||
|
||||
// Batch calls fn as part of a batch. It behaves similar to Update,
|
||||
@ -734,9 +802,7 @@ retry:
|
||||
|
||||
// pass success, or bolt internal errors, to all callers
|
||||
for _, c := range b.calls {
|
||||
if c.err != nil {
|
||||
c.err <- err
|
||||
}
|
||||
c.err <- err
|
||||
}
|
||||
break retry
|
||||
}
|
||||
@ -823,7 +889,7 @@ func (db *DB) meta() *meta {
|
||||
}
|
||||
|
||||
// allocate returns a contiguous block of memory starting at a given page.
|
||||
func (db *DB) allocate(count int) (*page, error) {
|
||||
func (db *DB) allocate(txid txid, count int) (*page, error) {
|
||||
// Allocate a temporary buffer for the page.
|
||||
var buf []byte
|
||||
if count == 1 {
|
||||
@ -835,7 +901,7 @@ func (db *DB) allocate(count int) (*page, error) {
|
||||
p.overflow = uint32(count - 1)
|
||||
|
||||
// Use pages from the freelist if they are available.
|
||||
if p.id = db.freelist.allocate(count); p.id != 0 {
|
||||
if p.id = db.freelist.allocate(txid, count); p.id != 0 {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
@ -890,6 +956,38 @@ func (db *DB) IsReadOnly() bool {
|
||||
return db.readOnly
|
||||
}
|
||||
|
||||
func (db *DB) freepages() []pgid {
|
||||
tx, err := db.beginTx()
|
||||
defer func() {
|
||||
err = tx.Rollback()
|
||||
if err != nil {
|
||||
panic("freepages: failed to rollback tx")
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
panic("freepages: failed to open read only tx")
|
||||
}
|
||||
|
||||
reachable := make(map[pgid]*page)
|
||||
nofreed := make(map[pgid]bool)
|
||||
ech := make(chan error)
|
||||
go func() {
|
||||
for e := range ech {
|
||||
panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e))
|
||||
}
|
||||
}()
|
||||
tx.checkBucket(&tx.root, reachable, nofreed, ech)
|
||||
close(ech)
|
||||
|
||||
var fids []pgid
|
||||
for i := pgid(2); i < db.meta().pgid; i++ {
|
||||
if _, ok := reachable[i]; !ok {
|
||||
fids = append(fids, i)
|
||||
}
|
||||
}
|
||||
return fids
|
||||
}
|
||||
|
||||
// Options represents the options that can be set when opening a database.
|
||||
type Options struct {
|
||||
// Timeout is the amount of time to wait to obtain a file lock.
|
||||
@ -900,6 +998,10 @@ type Options struct {
|
||||
// Sets the DB.NoGrowSync flag before memory mapping the file.
|
||||
NoGrowSync bool
|
||||
|
||||
// Do not sync freelist to disk. This improves the database write performance
|
||||
// under normal operation, but requires a full database re-sync during recovery.
|
||||
NoFreelistSync bool
|
||||
|
||||
// Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
|
||||
// grab a shared lock (UNIX).
|
||||
ReadOnly bool
|
||||
@ -916,6 +1018,14 @@ type Options struct {
|
||||
// If initialMmapSize is smaller than the previous database size,
|
||||
// it takes no effect.
|
||||
InitialMmapSize int
|
||||
|
||||
// PageSize overrides the default OS page size.
|
||||
PageSize int
|
||||
|
||||
// NoSync sets the initial value of DB.NoSync. Normally this can just be
|
||||
// set directly on the DB itself when returned from Open(), but this option
|
||||
// is useful in APIs which expose Options but not the underlying DB.
|
||||
NoSync bool
|
||||
}
|
||||
|
||||
// DefaultOptions represent the options used if nil options are passed into Open().
|
||||
@ -952,15 +1062,11 @@ func (s *Stats) Sub(other *Stats) Stats {
|
||||
diff.PendingPageN = s.PendingPageN
|
||||
diff.FreeAlloc = s.FreeAlloc
|
||||
diff.FreelistInuse = s.FreelistInuse
|
||||
diff.TxN = other.TxN - s.TxN
|
||||
diff.TxN = s.TxN - other.TxN
|
||||
diff.TxStats = s.TxStats.Sub(&other.TxStats)
|
||||
return diff
|
||||
}
|
||||
|
||||
func (s *Stats) add(other *Stats) {
|
||||
s.TxStats.add(&other.TxStats)
|
||||
}
|
||||
|
||||
type Info struct {
|
||||
Data uintptr
|
||||
PageSize int
|
||||
@ -999,7 +1105,8 @@ func (m *meta) copy(dest *meta) {
|
||||
func (m *meta) write(p *page) {
|
||||
if m.root.root >= m.pgid {
|
||||
panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
|
||||
} else if m.freelist >= m.pgid {
|
||||
} else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist {
|
||||
// TODO: reject pgidNoFreeList if !NoFreelistSync
|
||||
panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
|
||||
}
|
||||
|
||||
@ -1026,11 +1133,3 @@ func _assert(condition bool, msg string, v ...interface{}) {
|
||||
panic(fmt.Sprintf("assertion failed: "+msg, v...))
|
||||
}
|
||||
}
|
||||
|
||||
func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) }
|
||||
func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
|
||||
|
||||
func printstack() {
|
||||
stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n")
|
||||
fmt.Fprintln(os.Stderr, stack)
|
||||
}
|
0
vendor/github.com/boltdb/bolt/doc.go → vendor/github.com/coreos/bbolt/doc.go
generated
vendored
0
vendor/github.com/boltdb/bolt/doc.go → vendor/github.com/coreos/bbolt/doc.go
generated
vendored
0
vendor/github.com/boltdb/bolt/errors.go → vendor/github.com/coreos/bbolt/errors.go
generated
vendored
0
vendor/github.com/boltdb/bolt/errors.go → vendor/github.com/coreos/bbolt/errors.go
generated
vendored
167
vendor/github.com/boltdb/bolt/freelist.go → vendor/github.com/coreos/bbolt/freelist.go
generated
vendored
167
vendor/github.com/boltdb/bolt/freelist.go → vendor/github.com/coreos/bbolt/freelist.go
generated
vendored
@ -6,25 +6,40 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// txPending holds a list of pgids and corresponding allocation txns
|
||||
// that are pending to be freed.
|
||||
type txPending struct {
|
||||
ids []pgid
|
||||
alloctx []txid // txids allocating the ids
|
||||
lastReleaseBegin txid // beginning txid of last matching releaseRange
|
||||
}
|
||||
|
||||
// freelist represents a list of all pages that are available for allocation.
|
||||
// It also tracks pages that have been freed but are still in use by open transactions.
|
||||
type freelist struct {
|
||||
ids []pgid // all free and available free page ids.
|
||||
pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
|
||||
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||
ids []pgid // all free and available free page ids.
|
||||
allocs map[pgid]txid // mapping of txid that allocated a pgid.
|
||||
pending map[txid]*txPending // mapping of soon-to-be free page ids by tx.
|
||||
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||
}
|
||||
|
||||
// newFreelist returns an empty, initialized freelist.
|
||||
func newFreelist() *freelist {
|
||||
return &freelist{
|
||||
pending: make(map[txid][]pgid),
|
||||
allocs: make(map[pgid]txid),
|
||||
pending: make(map[txid]*txPending),
|
||||
cache: make(map[pgid]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// size returns the size of the page after serialization.
|
||||
func (f *freelist) size() int {
|
||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count())
|
||||
n := f.count()
|
||||
if n >= 0xFFFF {
|
||||
// The first element will be used to store the count. See freelist.write.
|
||||
n++
|
||||
}
|
||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||
}
|
||||
|
||||
// count returns count of pages on the freelist
|
||||
@ -40,27 +55,26 @@ func (f *freelist) free_count() int {
|
||||
// pending_count returns count of pending pages
|
||||
func (f *freelist) pending_count() int {
|
||||
var count int
|
||||
for _, list := range f.pending {
|
||||
count += len(list)
|
||||
for _, txp := range f.pending {
|
||||
count += len(txp.ids)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// all returns a list of all free ids and all pending ids in one sorted list.
|
||||
func (f *freelist) all() []pgid {
|
||||
m := make(pgids, 0)
|
||||
|
||||
for _, list := range f.pending {
|
||||
m = append(m, list...)
|
||||
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
|
||||
// f.count returns the minimum length required for dst.
|
||||
func (f *freelist) copyall(dst []pgid) {
|
||||
m := make(pgids, 0, f.pending_count())
|
||||
for _, txp := range f.pending {
|
||||
m = append(m, txp.ids...)
|
||||
}
|
||||
|
||||
sort.Sort(m)
|
||||
return pgids(f.ids).merge(m)
|
||||
mergepgids(dst, f.ids, m)
|
||||
}
|
||||
|
||||
// allocate returns the starting page id of a contiguous list of pages of a given size.
|
||||
// If a contiguous block cannot be found then 0 is returned.
|
||||
func (f *freelist) allocate(n int) pgid {
|
||||
func (f *freelist) allocate(txid txid, n int) pgid {
|
||||
if len(f.ids) == 0 {
|
||||
return 0
|
||||
}
|
||||
@ -93,7 +107,7 @@ func (f *freelist) allocate(n int) pgid {
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, initial+i)
|
||||
}
|
||||
|
||||
f.allocs[initial] = txid
|
||||
return initial
|
||||
}
|
||||
|
||||
@ -110,28 +124,73 @@ func (f *freelist) free(txid txid, p *page) {
|
||||
}
|
||||
|
||||
// Free page and all its overflow pages.
|
||||
var ids = f.pending[txid]
|
||||
txp := f.pending[txid]
|
||||
if txp == nil {
|
||||
txp = &txPending{}
|
||||
f.pending[txid] = txp
|
||||
}
|
||||
allocTxid, ok := f.allocs[p.id]
|
||||
if ok {
|
||||
delete(f.allocs, p.id)
|
||||
} else if (p.flags & freelistPageFlag) != 0 {
|
||||
// Freelist is always allocated by prior tx.
|
||||
allocTxid = txid - 1
|
||||
}
|
||||
|
||||
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
||||
// Verify that page is not already free.
|
||||
if f.cache[id] {
|
||||
panic(fmt.Sprintf("page %d already freed", id))
|
||||
}
|
||||
|
||||
// Add to the freelist and cache.
|
||||
ids = append(ids, id)
|
||||
txp.ids = append(txp.ids, id)
|
||||
txp.alloctx = append(txp.alloctx, allocTxid)
|
||||
f.cache[id] = true
|
||||
}
|
||||
f.pending[txid] = ids
|
||||
}
|
||||
|
||||
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||
func (f *freelist) release(txid txid) {
|
||||
m := make(pgids, 0)
|
||||
for tid, ids := range f.pending {
|
||||
for tid, txp := range f.pending {
|
||||
if tid <= txid {
|
||||
// Move transaction's pending pages to the available freelist.
|
||||
// Don't remove from the cache since the page is still free.
|
||||
m = append(m, ids...)
|
||||
m = append(m, txp.ids...)
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
sort.Sort(m)
|
||||
f.ids = pgids(f.ids).merge(m)
|
||||
}
|
||||
|
||||
// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
|
||||
func (f *freelist) releaseRange(begin, end txid) {
|
||||
if begin > end {
|
||||
return
|
||||
}
|
||||
var m pgids
|
||||
for tid, txp := range f.pending {
|
||||
if tid < begin || tid > end {
|
||||
continue
|
||||
}
|
||||
// Don't recompute freed pages if ranges haven't updated.
|
||||
if txp.lastReleaseBegin == begin {
|
||||
continue
|
||||
}
|
||||
for i := 0; i < len(txp.ids); i++ {
|
||||
if atx := txp.alloctx[i]; atx < begin || atx > end {
|
||||
continue
|
||||
}
|
||||
m = append(m, txp.ids[i])
|
||||
txp.ids[i] = txp.ids[len(txp.ids)-1]
|
||||
txp.ids = txp.ids[:len(txp.ids)-1]
|
||||
txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
|
||||
txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
|
||||
i--
|
||||
}
|
||||
txp.lastReleaseBegin = begin
|
||||
if len(txp.ids) == 0 {
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
@ -142,12 +201,29 @@ func (f *freelist) release(txid txid) {
|
||||
// rollback removes the pages from a given pending tx.
|
||||
func (f *freelist) rollback(txid txid) {
|
||||
// Remove page ids from cache.
|
||||
for _, id := range f.pending[txid] {
|
||||
delete(f.cache, id)
|
||||
txp := f.pending[txid]
|
||||
if txp == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Remove pages from pending list.
|
||||
var m pgids
|
||||
for i, pgid := range txp.ids {
|
||||
delete(f.cache, pgid)
|
||||
tx := txp.alloctx[i]
|
||||
if tx == 0 {
|
||||
continue
|
||||
}
|
||||
if tx != txid {
|
||||
// Pending free aborted; restore page back to alloc list.
|
||||
f.allocs[pgid] = tx
|
||||
} else {
|
||||
// Freed page was allocated by this txn; OK to throw away.
|
||||
m = append(m, pgid)
|
||||
}
|
||||
}
|
||||
// Remove pages from pending list and mark as free if allocated by txid.
|
||||
delete(f.pending, txid)
|
||||
sort.Sort(m)
|
||||
f.ids = pgids(f.ids).merge(m)
|
||||
}
|
||||
|
||||
// freed returns whether a given page is in the free list.
|
||||
@ -157,6 +233,9 @@ func (f *freelist) freed(pgid pgid) bool {
|
||||
|
||||
// read initializes the freelist from a freelist page.
|
||||
func (f *freelist) read(p *page) {
|
||||
if (p.flags & freelistPageFlag) == 0 {
|
||||
panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ()))
|
||||
}
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
idx, count := 0, int(p.count)
|
||||
@ -169,7 +248,7 @@ func (f *freelist) read(p *page) {
|
||||
if count == 0 {
|
||||
f.ids = nil
|
||||
} else {
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count]
|
||||
f.ids = make([]pgid, len(ids))
|
||||
copy(f.ids, ids)
|
||||
|
||||
@ -181,27 +260,33 @@ func (f *freelist) read(p *page) {
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// read initializes the freelist from a given list of ids.
|
||||
func (f *freelist) readIDs(ids []pgid) {
|
||||
f.ids = ids
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// write writes the page ids onto a freelist page. All free and pending ids are
|
||||
// saved to disk since in the event of a program crash, all pending ids will
|
||||
// become free.
|
||||
func (f *freelist) write(p *page) error {
|
||||
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||
ids := f.all()
|
||||
|
||||
// Update the header flag.
|
||||
p.flags |= freelistPageFlag
|
||||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
if len(ids) == 0 {
|
||||
p.count = uint16(len(ids))
|
||||
} else if len(ids) < 0xFFFF {
|
||||
p.count = uint16(len(ids))
|
||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
|
||||
lenids := f.count()
|
||||
if lenids == 0 {
|
||||
p.count = uint16(lenids)
|
||||
} else if lenids < 0xFFFF {
|
||||
p.count = uint16(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
|
||||
} else {
|
||||
p.count = 0xFFFF
|
||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids))
|
||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids)
|
||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -213,8 +298,8 @@ func (f *freelist) reload(p *page) {
|
||||
|
||||
// Build a cache of only pending pages.
|
||||
pcache := make(map[pgid]bool)
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
for _, txp := range f.pending {
|
||||
for _, pendingID := range txp.ids {
|
||||
pcache[pendingID] = true
|
||||
}
|
||||
}
|
||||
@ -236,12 +321,12 @@ func (f *freelist) reload(p *page) {
|
||||
|
||||
// reindex rebuilds the free cache based on available and pending free lists.
|
||||
func (f *freelist) reindex() {
|
||||
f.cache = make(map[pgid]bool)
|
||||
f.cache = make(map[pgid]bool, len(f.ids))
|
||||
for _, id := range f.ids {
|
||||
f.cache[id] = true
|
||||
}
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
for _, txp := range f.pending {
|
||||
for _, pendingID := range txp.ids {
|
||||
f.cache[pendingID] = true
|
||||
}
|
||||
}
|
2
vendor/github.com/boltdb/bolt/node.go → vendor/github.com/coreos/bbolt/node.go
generated
vendored
2
vendor/github.com/boltdb/bolt/node.go → vendor/github.com/coreos/bbolt/node.go
generated
vendored
@ -365,7 +365,7 @@ func (n *node) spill() error {
|
||||
}
|
||||
|
||||
// Allocate contiguous space for the node.
|
||||
p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
|
||||
p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
31
vendor/github.com/boltdb/bolt/page.go → vendor/github.com/coreos/bbolt/page.go
generated
vendored
31
vendor/github.com/boltdb/bolt/page.go → vendor/github.com/coreos/bbolt/page.go
generated
vendored
@ -145,12 +145,33 @@ func (a pgids) merge(b pgids) pgids {
|
||||
// Return the opposite slice if one is nil.
|
||||
if len(a) == 0 {
|
||||
return b
|
||||
} else if len(b) == 0 {
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return a
|
||||
}
|
||||
merged := make(pgids, len(a)+len(b))
|
||||
mergepgids(merged, a, b)
|
||||
return merged
|
||||
}
|
||||
|
||||
// Create a list to hold all elements from both lists.
|
||||
merged := make(pgids, 0, len(a)+len(b))
|
||||
// mergepgids copies the sorted union of a and b into dst.
|
||||
// If dst is too small, it panics.
|
||||
func mergepgids(dst, a, b pgids) {
|
||||
if len(dst) < len(a)+len(b) {
|
||||
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
|
||||
}
|
||||
// Copy in the opposite slice if one is nil.
|
||||
if len(a) == 0 {
|
||||
copy(dst, b)
|
||||
return
|
||||
}
|
||||
if len(b) == 0 {
|
||||
copy(dst, a)
|
||||
return
|
||||
}
|
||||
|
||||
// Merged will hold all elements from both lists.
|
||||
merged := dst[:0]
|
||||
|
||||
// Assign lead to the slice with a lower starting value, follow to the higher value.
|
||||
lead, follow := a, b
|
||||
@ -172,7 +193,5 @@ func (a pgids) merge(b pgids) pgids {
|
||||
}
|
||||
|
||||
// Append what's left in follow.
|
||||
merged = append(merged, follow...)
|
||||
|
||||
return merged
|
||||
_ = append(merged, follow...)
|
||||
}
|
83
vendor/github.com/boltdb/bolt/tx.go → vendor/github.com/coreos/bbolt/tx.go
generated
vendored
83
vendor/github.com/boltdb/bolt/tx.go → vendor/github.com/coreos/bbolt/tx.go
generated
vendored
@ -126,10 +126,7 @@ func (tx *Tx) DeleteBucket(name []byte) error {
|
||||
// the error is returned to the caller.
|
||||
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
|
||||
return tx.root.ForEach(func(k, v []byte) error {
|
||||
if err := fn(k, tx.root.Bucket(k)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return fn(k, tx.root.Bucket(k))
|
||||
})
|
||||
}
|
||||
|
||||
@ -169,28 +166,18 @@ func (tx *Tx) Commit() error {
|
||||
// Free the old root bucket.
|
||||
tx.meta.root.root = tx.root.root
|
||||
|
||||
opgid := tx.meta.pgid
|
||||
|
||||
// Free the freelist and allocate new pages for it. This will overestimate
|
||||
// the size of the freelist but not underestimate the size (which would be bad).
|
||||
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
||||
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
// Free the old freelist because commit writes out a fresh freelist.
|
||||
if tx.meta.freelist != pgidNoFreelist {
|
||||
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
||||
}
|
||||
if err := tx.db.freelist.write(p); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.meta.freelist = p.id
|
||||
|
||||
// If the high water mark has moved up then attempt to grow the database.
|
||||
if tx.meta.pgid > opgid {
|
||||
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
||||
tx.rollback()
|
||||
if !tx.db.NoFreelistSync {
|
||||
err := tx.commitFreelist()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
tx.meta.freelist = pgidNoFreelist
|
||||
}
|
||||
|
||||
// Write dirty pages to disk.
|
||||
@ -235,6 +222,31 @@ func (tx *Tx) Commit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *Tx) commitFreelist() error {
|
||||
// Allocate new pages for the new free list. This will overestimate
|
||||
// the size of the freelist but not underestimate the size (which would be bad).
|
||||
opgid := tx.meta.pgid
|
||||
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
if err := tx.db.freelist.write(p); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.meta.freelist = p.id
|
||||
// If the high water mark has moved up then attempt to grow the database.
|
||||
if tx.meta.pgid > opgid {
|
||||
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rollback closes the transaction and ignores all previous updates. Read-only
|
||||
// transactions must be rolled back and not committed.
|
||||
func (tx *Tx) Rollback() error {
|
||||
@ -305,7 +317,11 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
defer func() {
|
||||
if cerr := f.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
// Generate a meta page. We use the same page data for both meta pages.
|
||||
buf := make([]byte, tx.db.pageSize)
|
||||
@ -333,7 +349,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
}
|
||||
|
||||
// Move past the meta pages in the file.
|
||||
if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
|
||||
if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil {
|
||||
return n, fmt.Errorf("seek: %s", err)
|
||||
}
|
||||
|
||||
@ -344,7 +360,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, f.Close()
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// CopyFile copies the entire database to file at the given path.
|
||||
@ -379,9 +395,14 @@ func (tx *Tx) Check() <-chan error {
|
||||
}
|
||||
|
||||
func (tx *Tx) check(ch chan error) {
|
||||
// Force loading free list if opened in ReadOnly mode.
|
||||
tx.db.loadFreelist()
|
||||
|
||||
// Check if any pages are double freed.
|
||||
freed := make(map[pgid]bool)
|
||||
for _, id := range tx.db.freelist.all() {
|
||||
all := make([]pgid, tx.db.freelist.count())
|
||||
tx.db.freelist.copyall(all)
|
||||
for _, id := range all {
|
||||
if freed[id] {
|
||||
ch <- fmt.Errorf("page %d: already freed", id)
|
||||
}
|
||||
@ -392,8 +413,10 @@ func (tx *Tx) check(ch chan error) {
|
||||
reachable := make(map[pgid]*page)
|
||||
reachable[0] = tx.page(0) // meta0
|
||||
reachable[1] = tx.page(1) // meta1
|
||||
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
||||
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
||||
if tx.meta.freelist != pgidNoFreelist {
|
||||
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
||||
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively check buckets.
|
||||
@ -451,7 +474,7 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo
|
||||
|
||||
// allocate returns a contiguous block of memory starting at a given page.
|
||||
func (tx *Tx) allocate(count int) (*page, error) {
|
||||
p, err := tx.db.allocate(count)
|
||||
p, err := tx.db.allocate(tx.meta.txid, count)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -460,7 +483,7 @@ func (tx *Tx) allocate(count int) (*page, error) {
|
||||
tx.pages[p.id] = p
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.PageCount++
|
||||
tx.stats.PageCount += count
|
||||
tx.stats.PageAlloc += count * tx.db.pageSize
|
||||
|
||||
return p, nil
|
5
vendor/github.com/coreos/etcd/auth/BUILD
generated
vendored
5
vendor/github.com/coreos/etcd/auth/BUILD
generated
vendored
@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"jwt.go",
|
||||
"range_perm_cache.go",
|
||||
"simple_token.go",
|
||||
"store.go",
|
||||
@ -14,10 +15,14 @@ go_library(
|
||||
"//vendor/github.com/coreos/etcd/auth/authpb:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/adt:go_default_library",
|
||||
"//vendor/github.com/coreos/pkg/capnslog:go_default_library",
|
||||
"//vendor/github.com/dgrijalva/jwt-go:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/bcrypt:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/credentials:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/metadata:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/peer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
generated
vendored
2
vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
generated
vendored
@ -803,7 +803,7 @@ func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
|
||||
|
||||
var fileDescriptorAuth = []byte{
|
||||
// 288 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
|
||||
0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
|
||||
0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
|
||||
0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
|
||||
|
137
vendor/github.com/coreos/etcd/auth/jwt.go
generated
vendored
Normal file
137
vendor/github.com/coreos/etcd/auth/jwt.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"io/ioutil"
|
||||
|
||||
jwt "github.com/dgrijalva/jwt-go"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type tokenJWT struct {
|
||||
signMethod string
|
||||
signKey *rsa.PrivateKey
|
||||
verifyKey *rsa.PublicKey
|
||||
}
|
||||
|
||||
func (t *tokenJWT) enable() {}
|
||||
func (t *tokenJWT) disable() {}
|
||||
func (t *tokenJWT) invalidateUser(string) {}
|
||||
func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil }
|
||||
|
||||
func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
|
||||
// rev isn't used in JWT, it is only used in simple token
|
||||
var (
|
||||
username string
|
||||
revision uint64
|
||||
)
|
||||
|
||||
parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
|
||||
return t.verifyKey, nil
|
||||
})
|
||||
|
||||
switch err.(type) {
|
||||
case nil:
|
||||
if !parsed.Valid {
|
||||
plog.Warningf("invalid jwt token: %s", token)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
claims := parsed.Claims.(jwt.MapClaims)
|
||||
|
||||
username = claims["username"].(string)
|
||||
revision = uint64(claims["revision"].(float64))
|
||||
default:
|
||||
plog.Warningf("failed to parse jwt token: %s", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return &AuthInfo{Username: username, Revision: revision}, true
|
||||
}
|
||||
|
||||
func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) {
|
||||
// Future work: let a jwt token include permission information would be useful for
|
||||
// permission checking in proxy side.
|
||||
tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod),
|
||||
jwt.MapClaims{
|
||||
"username": username,
|
||||
"revision": revision,
|
||||
})
|
||||
|
||||
token, err := tk.SignedString(t.signKey)
|
||||
if err != nil {
|
||||
plog.Debugf("failed to sign jwt token: %s", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
plog.Debugf("jwt token: %s", token)
|
||||
|
||||
return token, err
|
||||
}
|
||||
|
||||
func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) {
|
||||
for k, v := range opts {
|
||||
switch k {
|
||||
case "sign-method":
|
||||
jwtSignMethod = v
|
||||
case "pub-key":
|
||||
jwtPubKeyPath = v
|
||||
case "priv-key":
|
||||
jwtPrivKeyPath = v
|
||||
default:
|
||||
plog.Errorf("unknown token specific option: %s", k)
|
||||
return "", "", "", ErrInvalidAuthOpts
|
||||
}
|
||||
}
|
||||
|
||||
return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil
|
||||
}
|
||||
|
||||
func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) {
|
||||
jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts)
|
||||
if err != nil {
|
||||
return nil, ErrInvalidAuthOpts
|
||||
}
|
||||
|
||||
t := &tokenJWT{}
|
||||
|
||||
t.signMethod = jwtSignMethod
|
||||
|
||||
verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err)
|
||||
return nil, err
|
||||
}
|
||||
t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signBytes, err := ioutil.ReadFile(jwtPrivKeyPath)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err)
|
||||
return nil, err
|
||||
}
|
||||
t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
180
vendor/github.com/coreos/etcd/auth/range_perm_cache.go
generated
vendored
180
vendor/github.com/coreos/etcd/auth/range_perm_cache.go
generated
vendored
@ -15,93 +15,11 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
|
||||
"github.com/coreos/etcd/auth/authpb"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/pkg/adt"
|
||||
)
|
||||
|
||||
// isSubset returns true if a is a subset of b.
|
||||
// If a is a prefix of b, then a is a subset of b.
|
||||
// Given intervals [a1,a2) and [b1,b2), is
|
||||
// the a interval a subset of b?
|
||||
func isSubset(a, b *rangePerm) bool {
|
||||
switch {
|
||||
case len(a.end) == 0 && len(b.end) == 0:
|
||||
// a, b are both keys
|
||||
return bytes.Equal(a.begin, b.begin)
|
||||
case len(b.end) == 0:
|
||||
// b is a key, a is a range
|
||||
return false
|
||||
case len(a.end) == 0:
|
||||
// a is a key, b is a range. need b1 <= a1 and a1 < b2
|
||||
return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.begin, b.end) < 0
|
||||
default:
|
||||
// both are ranges. need b1 <= a1 and a2 <= b2
|
||||
return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.end, b.end) <= 0
|
||||
}
|
||||
}
|
||||
|
||||
func isRangeEqual(a, b *rangePerm) bool {
|
||||
return bytes.Equal(a.begin, b.begin) && bytes.Equal(a.end, b.end)
|
||||
}
|
||||
|
||||
// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms.
|
||||
// If there are equal ranges, removeSubsetRangePerms only keeps one of them.
|
||||
// It returns a sorted rangePerm slice.
|
||||
func removeSubsetRangePerms(perms []*rangePerm) (newp []*rangePerm) {
|
||||
sort.Sort(RangePermSliceByBegin(perms))
|
||||
var prev *rangePerm
|
||||
for i := range perms {
|
||||
if i == 0 {
|
||||
prev = perms[i]
|
||||
newp = append(newp, perms[i])
|
||||
continue
|
||||
}
|
||||
if isRangeEqual(perms[i], prev) {
|
||||
continue
|
||||
}
|
||||
if isSubset(perms[i], prev) {
|
||||
continue
|
||||
}
|
||||
if isSubset(prev, perms[i]) {
|
||||
prev = perms[i]
|
||||
newp[len(newp)-1] = perms[i]
|
||||
continue
|
||||
}
|
||||
prev = perms[i]
|
||||
newp = append(newp, perms[i])
|
||||
}
|
||||
return newp
|
||||
}
|
||||
|
||||
// mergeRangePerms merges adjacent rangePerms.
|
||||
func mergeRangePerms(perms []*rangePerm) []*rangePerm {
|
||||
var merged []*rangePerm
|
||||
perms = removeSubsetRangePerms(perms)
|
||||
|
||||
i := 0
|
||||
for i < len(perms) {
|
||||
begin, next := i, i
|
||||
for next+1 < len(perms) && bytes.Compare(perms[next].end, perms[next+1].begin) >= 0 {
|
||||
next++
|
||||
}
|
||||
// don't merge ["a", "b") with ["b", ""), because perms[next+1].end is empty.
|
||||
if next != begin && len(perms[next].end) > 0 {
|
||||
merged = append(merged, &rangePerm{begin: perms[begin].begin, end: perms[next].end})
|
||||
} else {
|
||||
merged = append(merged, perms[begin])
|
||||
if next != begin {
|
||||
merged = append(merged, perms[next])
|
||||
}
|
||||
}
|
||||
i = next + 1
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
|
||||
func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions {
|
||||
user := getUser(tx, userName)
|
||||
if user == nil {
|
||||
@ -109,7 +27,8 @@ func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermission
|
||||
return nil
|
||||
}
|
||||
|
||||
var readPerms, writePerms []*rangePerm
|
||||
readPerms := &adt.IntervalTree{}
|
||||
writePerms := &adt.IntervalTree{}
|
||||
|
||||
for _, roleName := range user.Roles {
|
||||
role := getRole(tx, roleName)
|
||||
@ -118,48 +37,66 @@ func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermission
|
||||
}
|
||||
|
||||
for _, perm := range role.KeyPermission {
|
||||
rp := &rangePerm{begin: perm.Key, end: perm.RangeEnd}
|
||||
var ivl adt.Interval
|
||||
var rangeEnd []byte
|
||||
|
||||
if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 {
|
||||
rangeEnd = perm.RangeEnd
|
||||
}
|
||||
|
||||
if len(perm.RangeEnd) != 0 {
|
||||
ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd)
|
||||
} else {
|
||||
ivl = adt.NewBytesAffinePoint(perm.Key)
|
||||
}
|
||||
|
||||
switch perm.PermType {
|
||||
case authpb.READWRITE:
|
||||
readPerms = append(readPerms, rp)
|
||||
writePerms = append(writePerms, rp)
|
||||
readPerms.Insert(ivl, struct{}{})
|
||||
writePerms.Insert(ivl, struct{}{})
|
||||
|
||||
case authpb.READ:
|
||||
readPerms = append(readPerms, rp)
|
||||
readPerms.Insert(ivl, struct{}{})
|
||||
|
||||
case authpb.WRITE:
|
||||
writePerms = append(writePerms, rp)
|
||||
writePerms.Insert(ivl, struct{}{})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &unifiedRangePermissions{
|
||||
readPerms: mergeRangePerms(readPerms),
|
||||
writePerms: mergeRangePerms(writePerms),
|
||||
readPerms: readPerms,
|
||||
writePerms: writePerms,
|
||||
}
|
||||
}
|
||||
|
||||
func checkKeyPerm(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
|
||||
var tocheck []*rangePerm
|
||||
func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
|
||||
if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
|
||||
rangeEnd = nil
|
||||
}
|
||||
|
||||
ivl := adt.NewBytesAffineInterval(key, rangeEnd)
|
||||
switch permtyp {
|
||||
case authpb.READ:
|
||||
tocheck = cachedPerms.readPerms
|
||||
return cachedPerms.readPerms.Contains(ivl)
|
||||
case authpb.WRITE:
|
||||
tocheck = cachedPerms.writePerms
|
||||
return cachedPerms.writePerms.Contains(ivl)
|
||||
default:
|
||||
plog.Panicf("unknown auth type: %v", permtyp)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
requiredPerm := &rangePerm{begin: key, end: rangeEnd}
|
||||
|
||||
for _, perm := range tocheck {
|
||||
if isSubset(requiredPerm, perm) {
|
||||
return true
|
||||
}
|
||||
func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool {
|
||||
pt := adt.NewBytesAffinePoint(key)
|
||||
switch permtyp {
|
||||
case authpb.READ:
|
||||
return cachedPerms.readPerms.Intersects(pt)
|
||||
case authpb.WRITE:
|
||||
return cachedPerms.writePerms.Intersects(pt)
|
||||
default:
|
||||
plog.Panicf("unknown auth type: %v", permtyp)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@ -175,7 +112,11 @@ func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key
|
||||
as.rangePermCache[userName] = perms
|
||||
}
|
||||
|
||||
return checkKeyPerm(as.rangePermCache[userName], key, rangeEnd, permtyp)
|
||||
if len(rangeEnd) == 0 {
|
||||
return checkKeyPoint(as.rangePermCache[userName], key, permtyp)
|
||||
}
|
||||
|
||||
return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp)
|
||||
}
|
||||
|
||||
func (as *authStore) clearCachedPerm() {
|
||||
@ -187,35 +128,6 @@ func (as *authStore) invalidateCachedPerm(userName string) {
|
||||
}
|
||||
|
||||
type unifiedRangePermissions struct {
|
||||
// readPerms[i] and readPerms[j] (i != j) don't overlap
|
||||
readPerms []*rangePerm
|
||||
// writePerms[i] and writePerms[j] (i != j) don't overlap, too
|
||||
writePerms []*rangePerm
|
||||
}
|
||||
|
||||
type rangePerm struct {
|
||||
begin, end []byte
|
||||
}
|
||||
|
||||
type RangePermSliceByBegin []*rangePerm
|
||||
|
||||
func (slice RangePermSliceByBegin) Len() int {
|
||||
return len(slice)
|
||||
}
|
||||
|
||||
func (slice RangePermSliceByBegin) Less(i, j int) bool {
|
||||
switch bytes.Compare(slice[i].begin, slice[j].begin) {
|
||||
case 0: // begin(i) == begin(j)
|
||||
return bytes.Compare(slice[i].end, slice[j].end) == -1
|
||||
|
||||
case -1: // begin(i) < begin(j)
|
||||
return true
|
||||
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (slice RangePermSliceByBegin) Swap(i, j int) {
|
||||
slice[i], slice[j] = slice[j], slice[i]
|
||||
readPerms *adt.IntervalTree
|
||||
writePerms *adt.IntervalTree
|
||||
}
|
||||
|
129
vendor/github.com/coreos/etcd/auth/simple_token.go
generated
vendored
129
vendor/github.com/coreos/etcd/auth/simple_token.go
generated
vendored
@ -19,10 +19,14 @@ package auth
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -90,24 +94,14 @@ func (tm *simpleTokenTTLKeeper) run() {
|
||||
}
|
||||
}
|
||||
|
||||
func (as *authStore) enable() {
|
||||
delf := func(tk string) {
|
||||
if username, ok := as.simpleTokens[tk]; ok {
|
||||
plog.Infof("deleting token %s for user %s", tk, username)
|
||||
delete(as.simpleTokens, tk)
|
||||
}
|
||||
}
|
||||
as.simpleTokenKeeper = &simpleTokenTTLKeeper{
|
||||
tokens: make(map[string]time.Time),
|
||||
donec: make(chan struct{}),
|
||||
stopc: make(chan struct{}),
|
||||
deleteTokenFunc: delf,
|
||||
mu: &as.simpleTokensMu,
|
||||
}
|
||||
go as.simpleTokenKeeper.run()
|
||||
type tokenSimple struct {
|
||||
indexWaiter func(uint64) <-chan struct{}
|
||||
simpleTokenKeeper *simpleTokenTTLKeeper
|
||||
simpleTokensMu sync.Mutex
|
||||
simpleTokens map[string]string // token -> username
|
||||
}
|
||||
|
||||
func (as *authStore) GenSimpleToken() (string, error) {
|
||||
func (t *tokenSimple) genTokenPrefix() (string, error) {
|
||||
ret := make([]byte, defaultSimpleTokenLength)
|
||||
|
||||
for i := 0; i < defaultSimpleTokenLength; i++ {
|
||||
@ -122,28 +116,105 @@ func (as *authStore) GenSimpleToken() (string, error) {
|
||||
return string(ret), nil
|
||||
}
|
||||
|
||||
func (as *authStore) assignSimpleTokenToUser(username, token string) {
|
||||
as.simpleTokensMu.Lock()
|
||||
_, ok := as.simpleTokens[token]
|
||||
func (t *tokenSimple) assignSimpleTokenToUser(username, token string) {
|
||||
t.simpleTokensMu.Lock()
|
||||
_, ok := t.simpleTokens[token]
|
||||
if ok {
|
||||
plog.Panicf("token %s is alredy used", token)
|
||||
}
|
||||
|
||||
as.simpleTokens[token] = username
|
||||
as.simpleTokenKeeper.addSimpleToken(token)
|
||||
as.simpleTokensMu.Unlock()
|
||||
t.simpleTokens[token] = username
|
||||
t.simpleTokenKeeper.addSimpleToken(token)
|
||||
t.simpleTokensMu.Unlock()
|
||||
}
|
||||
|
||||
func (as *authStore) invalidateUser(username string) {
|
||||
if as.simpleTokenKeeper == nil {
|
||||
func (t *tokenSimple) invalidateUser(username string) {
|
||||
if t.simpleTokenKeeper == nil {
|
||||
return
|
||||
}
|
||||
as.simpleTokensMu.Lock()
|
||||
for token, name := range as.simpleTokens {
|
||||
t.simpleTokensMu.Lock()
|
||||
for token, name := range t.simpleTokens {
|
||||
if strings.Compare(name, username) == 0 {
|
||||
delete(as.simpleTokens, token)
|
||||
as.simpleTokenKeeper.deleteSimpleToken(token)
|
||||
delete(t.simpleTokens, token)
|
||||
t.simpleTokenKeeper.deleteSimpleToken(token)
|
||||
}
|
||||
}
|
||||
as.simpleTokensMu.Unlock()
|
||||
t.simpleTokensMu.Unlock()
|
||||
}
|
||||
|
||||
func (t *tokenSimple) enable() {
|
||||
delf := func(tk string) {
|
||||
if username, ok := t.simpleTokens[tk]; ok {
|
||||
plog.Infof("deleting token %s for user %s", tk, username)
|
||||
delete(t.simpleTokens, tk)
|
||||
}
|
||||
}
|
||||
t.simpleTokenKeeper = &simpleTokenTTLKeeper{
|
||||
tokens: make(map[string]time.Time),
|
||||
donec: make(chan struct{}),
|
||||
stopc: make(chan struct{}),
|
||||
deleteTokenFunc: delf,
|
||||
mu: &t.simpleTokensMu,
|
||||
}
|
||||
go t.simpleTokenKeeper.run()
|
||||
}
|
||||
|
||||
func (t *tokenSimple) disable() {
|
||||
t.simpleTokensMu.Lock()
|
||||
tk := t.simpleTokenKeeper
|
||||
t.simpleTokenKeeper = nil
|
||||
t.simpleTokens = make(map[string]string) // invalidate all tokens
|
||||
t.simpleTokensMu.Unlock()
|
||||
if tk != nil {
|
||||
tk.stop()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) {
|
||||
if !t.isValidSimpleToken(ctx, token) {
|
||||
return nil, false
|
||||
}
|
||||
t.simpleTokensMu.Lock()
|
||||
username, ok := t.simpleTokens[token]
|
||||
if ok && t.simpleTokenKeeper != nil {
|
||||
t.simpleTokenKeeper.resetSimpleToken(token)
|
||||
}
|
||||
t.simpleTokensMu.Unlock()
|
||||
return &AuthInfo{Username: username, Revision: revision}, ok
|
||||
}
|
||||
|
||||
func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) {
|
||||
// rev isn't used in simple token, it is only used in JWT
|
||||
index := ctx.Value("index").(uint64)
|
||||
simpleToken := ctx.Value("simpleToken").(string)
|
||||
token := fmt.Sprintf("%s.%d", simpleToken, index)
|
||||
t.assignSimpleTokenToUser(username, token)
|
||||
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool {
|
||||
splitted := strings.Split(token, ".")
|
||||
if len(splitted) != 2 {
|
||||
return false
|
||||
}
|
||||
index, err := strconv.Atoi(splitted[1])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case <-t.indexWaiter(uint64(index)):
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple {
|
||||
return &tokenSimple{
|
||||
simpleTokens: make(map[string]string),
|
||||
indexWaiter: indexWaiter,
|
||||
}
|
||||
}
|
||||
|
260
vendor/github.com/coreos/etcd/auth/store.go
generated
vendored
260
vendor/github.com/coreos/etcd/auth/store.go
generated
vendored
@ -18,11 +18,10 @@ import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/coreos/etcd/auth/authpb"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
@ -30,7 +29,9 @@ import (
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -60,6 +61,8 @@ var (
|
||||
ErrAuthNotEnabled = errors.New("auth: authentication is not enabled")
|
||||
ErrAuthOldRevision = errors.New("auth: revision in header is old")
|
||||
ErrInvalidAuthToken = errors.New("auth: invalid auth token")
|
||||
ErrInvalidAuthOpts = errors.New("auth: invalid auth options")
|
||||
ErrInvalidAuthMgmt = errors.New("auth: invalid auth management")
|
||||
|
||||
// BcryptCost is the algorithm cost / strength for hashing auth passwords
|
||||
BcryptCost = bcrypt.DefaultCost
|
||||
@ -129,10 +132,6 @@ type AuthStore interface {
|
||||
// RoleList gets a list of all roles
|
||||
RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
|
||||
|
||||
// AuthInfoFromToken gets a username from the given Token and current revision number
|
||||
// (The revision number is used for preventing the TOCTOU problem)
|
||||
AuthInfoFromToken(token string) (*AuthInfo, bool)
|
||||
|
||||
// IsPutPermitted checks put permission of the user
|
||||
IsPutPermitted(authInfo *AuthInfo, key []byte) error
|
||||
|
||||
@ -145,8 +144,9 @@ type AuthStore interface {
|
||||
// IsAdminPermitted checks admin permission of the user
|
||||
IsAdminPermitted(authInfo *AuthInfo) error
|
||||
|
||||
// GenSimpleToken produces a simple random string
|
||||
GenSimpleToken() (string, error)
|
||||
// GenTokenPrefix produces a random string in a case of simple token
|
||||
// in a case of JWT, it produces an empty string
|
||||
GenTokenPrefix() (string, error)
|
||||
|
||||
// Revision gets current revision of authStore
|
||||
Revision() uint64
|
||||
@ -159,33 +159,32 @@ type AuthStore interface {
|
||||
|
||||
// AuthInfoFromCtx gets AuthInfo from gRPC's context
|
||||
AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error)
|
||||
|
||||
// AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context
|
||||
AuthInfoFromTLS(ctx context.Context) *AuthInfo
|
||||
}
|
||||
|
||||
type TokenProvider interface {
|
||||
info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool)
|
||||
assign(ctx context.Context, username string, revision uint64) (string, error)
|
||||
enable()
|
||||
disable()
|
||||
|
||||
invalidateUser(string)
|
||||
genTokenPrefix() (string, error)
|
||||
}
|
||||
|
||||
type authStore struct {
|
||||
// atomic operations; need 64-bit align, or 32-bit tests will crash
|
||||
revision uint64
|
||||
|
||||
be backend.Backend
|
||||
enabled bool
|
||||
enabledMu sync.RWMutex
|
||||
|
||||
rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
|
||||
|
||||
revision uint64
|
||||
|
||||
// tokenSimple in v3.2+
|
||||
indexWaiter func(uint64) <-chan struct{}
|
||||
simpleTokenKeeper *simpleTokenTTLKeeper
|
||||
simpleTokensMu sync.Mutex
|
||||
simpleTokens map[string]string // token -> username
|
||||
}
|
||||
|
||||
func newDeleterFunc(as *authStore) func(string) {
|
||||
return func(t string) {
|
||||
as.simpleTokensMu.Lock()
|
||||
defer as.simpleTokensMu.Unlock()
|
||||
if username, ok := as.simpleTokens[t]; ok {
|
||||
plog.Infof("deleting token %s for user %s", t, username)
|
||||
delete(as.simpleTokens, t)
|
||||
}
|
||||
}
|
||||
tokenProvider TokenProvider
|
||||
}
|
||||
|
||||
func (as *authStore) AuthEnable() error {
|
||||
@ -215,11 +214,11 @@ func (as *authStore) AuthEnable() error {
|
||||
tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
|
||||
|
||||
as.enabled = true
|
||||
as.enable()
|
||||
as.tokenProvider.enable()
|
||||
|
||||
as.rangePermCache = make(map[string]*unifiedRangePermissions)
|
||||
|
||||
as.revision = getRevision(tx)
|
||||
as.setRevision(getRevision(tx))
|
||||
|
||||
plog.Noticef("Authentication enabled")
|
||||
|
||||
@ -241,15 +240,7 @@ func (as *authStore) AuthDisable() {
|
||||
b.ForceCommit()
|
||||
|
||||
as.enabled = false
|
||||
|
||||
as.simpleTokensMu.Lock()
|
||||
tk := as.simpleTokenKeeper
|
||||
as.simpleTokenKeeper = nil
|
||||
as.simpleTokens = make(map[string]string) // invalidate all tokens
|
||||
as.simpleTokensMu.Unlock()
|
||||
if tk != nil {
|
||||
tk.stop()
|
||||
}
|
||||
as.tokenProvider.disable()
|
||||
|
||||
plog.Noticef("Authentication disabled")
|
||||
}
|
||||
@ -260,10 +251,7 @@ func (as *authStore) Close() error {
|
||||
if !as.enabled {
|
||||
return nil
|
||||
}
|
||||
if as.simpleTokenKeeper != nil {
|
||||
as.simpleTokenKeeper.stop()
|
||||
as.simpleTokenKeeper = nil
|
||||
}
|
||||
as.tokenProvider.disable()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -272,10 +260,6 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
|
||||
return nil, ErrAuthNotEnabled
|
||||
}
|
||||
|
||||
// TODO(mitake): after adding jwt support, branching based on values of ctx is required
|
||||
index := ctx.Value("index").(uint64)
|
||||
simpleToken := ctx.Value("simpleToken").(string)
|
||||
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
@ -285,14 +269,23 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
|
||||
return nil, ErrAuthFailed
|
||||
}
|
||||
|
||||
token := fmt.Sprintf("%s.%d", simpleToken, index)
|
||||
as.assignSimpleTokenToUser(username, token)
|
||||
// Password checking is already performed in the API layer, so we don't need to check for now.
|
||||
// Staleness of password can be detected with OCC in the API layer, too.
|
||||
|
||||
plog.Infof("authorized %s, token is %s", username, token)
|
||||
token, err := as.tokenProvider.assign(ctx, username, as.Revision())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
plog.Debugf("authorized %s, token is %s", username, token)
|
||||
return &pb.AuthenticateResponse{Token: token}, nil
|
||||
}
|
||||
|
||||
func (as *authStore) CheckPassword(username, password string) (uint64, error) {
|
||||
if !as.isAuthEnabled() {
|
||||
return 0, ErrAuthNotEnabled
|
||||
}
|
||||
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
@ -322,7 +315,7 @@ func (as *authStore) Recover(be backend.Backend) {
|
||||
}
|
||||
}
|
||||
|
||||
as.revision = getRevision(tx)
|
||||
as.setRevision(getRevision(tx))
|
||||
|
||||
tx.Unlock()
|
||||
|
||||
@ -366,6 +359,11 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
|
||||
}
|
||||
|
||||
func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
|
||||
if as.enabled && strings.Compare(r.Name, rootUser) == 0 {
|
||||
plog.Errorf("the user root must not be deleted")
|
||||
return nil, ErrInvalidAuthMgmt
|
||||
}
|
||||
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
@ -380,7 +378,7 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
|
||||
as.commitRevision(tx)
|
||||
|
||||
as.invalidateCachedPerm(r.Name)
|
||||
as.invalidateUser(r.Name)
|
||||
as.tokenProvider.invalidateUser(r.Name)
|
||||
|
||||
plog.Noticef("deleted a user: %s", r.Name)
|
||||
|
||||
@ -416,7 +414,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
|
||||
as.commitRevision(tx)
|
||||
|
||||
as.invalidateCachedPerm(r.Name)
|
||||
as.invalidateUser(r.Name)
|
||||
as.tokenProvider.invalidateUser(r.Name)
|
||||
|
||||
plog.Noticef("changed a password of a user: %s", r.Name)
|
||||
|
||||
@ -491,6 +489,11 @@ func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListRespon
|
||||
}
|
||||
|
||||
func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
|
||||
if as.enabled && strings.Compare(r.Name, rootUser) == 0 && strings.Compare(r.Role, rootRole) == 0 {
|
||||
plog.Errorf("the role root must not be revoked from the user root")
|
||||
return nil, ErrInvalidAuthMgmt
|
||||
}
|
||||
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
@ -593,17 +596,10 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest)
|
||||
}
|
||||
|
||||
func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
|
||||
// TODO(mitake): current scheme of role deletion allows existing users to have the deleted roles
|
||||
//
|
||||
// Assume a case like below:
|
||||
// create a role r1
|
||||
// create a user u1 and grant r1 to u1
|
||||
// delete r1
|
||||
//
|
||||
// After this sequence, u1 is still granted the role r1. So if admin create a new role with the name r1,
|
||||
// the new r1 is automatically granted u1.
|
||||
// In some cases, it would be confusing. So we need to provide an option for deleting the grant relation
|
||||
// from all users.
|
||||
if as.enabled && strings.Compare(r.Role, rootRole) == 0 {
|
||||
plog.Errorf("the role root must not be deleted")
|
||||
return nil, ErrInvalidAuthMgmt
|
||||
}
|
||||
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
@ -616,6 +612,28 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete
|
||||
|
||||
delRole(tx, r.Role)
|
||||
|
||||
users := getAllUsers(tx)
|
||||
for _, user := range users {
|
||||
updatedUser := &authpb.User{
|
||||
Name: user.Name,
|
||||
Password: user.Password,
|
||||
}
|
||||
|
||||
for _, role := range user.Roles {
|
||||
if strings.Compare(role, r.Role) != 0 {
|
||||
updatedUser.Roles = append(updatedUser.Roles, role)
|
||||
}
|
||||
}
|
||||
|
||||
if len(updatedUser.Roles) == len(user.Roles) {
|
||||
continue
|
||||
}
|
||||
|
||||
putUser(tx, updatedUser)
|
||||
|
||||
as.invalidateCachedPerm(string(user.Name))
|
||||
}
|
||||
|
||||
as.commitRevision(tx)
|
||||
|
||||
plog.Noticef("deleted role %s", r.Role)
|
||||
@ -645,15 +663,8 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
|
||||
return &pb.AuthRoleAddResponse{}, nil
|
||||
}
|
||||
|
||||
func (as *authStore) AuthInfoFromToken(token string) (*AuthInfo, bool) {
|
||||
// same as '(t *tokenSimple) info' in v3.2+
|
||||
as.simpleTokensMu.Lock()
|
||||
username, ok := as.simpleTokens[token]
|
||||
if ok && as.simpleTokenKeeper != nil {
|
||||
as.simpleTokenKeeper.resetSimpleToken(token)
|
||||
}
|
||||
as.simpleTokensMu.Unlock()
|
||||
return &AuthInfo{Username: username, Revision: as.revision}, ok
|
||||
func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) {
|
||||
return as.tokenProvider.info(ctx, token, as.Revision())
|
||||
}
|
||||
|
||||
type permSlice []*authpb.Permission
|
||||
@ -723,7 +734,7 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE
|
||||
return ErrUserEmpty
|
||||
}
|
||||
|
||||
if revision < as.revision {
|
||||
if revision < as.Revision() {
|
||||
return ErrAuthOldRevision
|
||||
}
|
||||
|
||||
@ -886,7 +897,7 @@ func (as *authStore) isAuthEnabled() bool {
|
||||
return as.enabled
|
||||
}
|
||||
|
||||
func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{}) *authStore {
|
||||
func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore {
|
||||
tx := be.BatchTx()
|
||||
tx.Lock()
|
||||
|
||||
@ -904,18 +915,17 @@ func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{})
|
||||
|
||||
as := &authStore{
|
||||
be: be,
|
||||
simpleTokens: make(map[string]string),
|
||||
revision: getRevision(tx),
|
||||
indexWaiter: indexWaiter,
|
||||
enabled: enabled,
|
||||
rangePermCache: make(map[string]*unifiedRangePermissions),
|
||||
tokenProvider: tp,
|
||||
}
|
||||
|
||||
if enabled {
|
||||
as.enable()
|
||||
as.tokenProvider.enable()
|
||||
}
|
||||
|
||||
if as.revision == 0 {
|
||||
if as.Revision() == 0 {
|
||||
as.commitRevision(tx)
|
||||
}
|
||||
|
||||
@ -935,9 +945,9 @@ func hasRootRole(u *authpb.User) bool {
|
||||
}
|
||||
|
||||
func (as *authStore) commitRevision(tx backend.BatchTx) {
|
||||
as.revision++
|
||||
atomic.AddUint64(&as.revision, 1)
|
||||
revBytes := make([]byte, revBytesLen)
|
||||
binary.BigEndian.PutUint64(revBytes, as.revision)
|
||||
binary.BigEndian.PutUint64(revBytes, as.Revision())
|
||||
tx.UnsafePut(authBucketName, revisionKey, revBytes)
|
||||
}
|
||||
|
||||
@ -951,31 +961,38 @@ func getRevision(tx backend.BatchTx) uint64 {
|
||||
return binary.BigEndian.Uint64(vs[0])
|
||||
}
|
||||
|
||||
func (as *authStore) Revision() uint64 {
|
||||
return as.revision
|
||||
func (as *authStore) setRevision(rev uint64) {
|
||||
atomic.StoreUint64(&as.revision, rev)
|
||||
}
|
||||
|
||||
func (as *authStore) isValidSimpleToken(token string, ctx context.Context) bool {
|
||||
splitted := strings.Split(token, ".")
|
||||
if len(splitted) != 2 {
|
||||
return false
|
||||
}
|
||||
index, err := strconv.Atoi(splitted[1])
|
||||
if err != nil {
|
||||
return false
|
||||
func (as *authStore) Revision() uint64 {
|
||||
return atomic.LoadUint64(&as.revision)
|
||||
}
|
||||
|
||||
func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo {
|
||||
peer, ok := peer.FromContext(ctx)
|
||||
if !ok || peer == nil || peer.AuthInfo == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-as.indexWaiter(uint64(index)):
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
tlsInfo := peer.AuthInfo.(credentials.TLSInfo)
|
||||
for _, chains := range tlsInfo.State.VerifiedChains {
|
||||
for _, chain := range chains {
|
||||
cn := chain.Subject.CommonName
|
||||
plog.Debugf("found common name %s", cn)
|
||||
|
||||
return &AuthInfo{
|
||||
Username: cn,
|
||||
Revision: as.Revision(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
|
||||
md, ok := metadata.FromContext(ctx)
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
@ -986,14 +1003,57 @@ func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
|
||||
}
|
||||
|
||||
token := ts[0]
|
||||
if !as.isValidSimpleToken(token, ctx) {
|
||||
return nil, ErrInvalidAuthToken
|
||||
}
|
||||
|
||||
authInfo, uok := as.AuthInfoFromToken(token)
|
||||
authInfo, uok := as.authInfoFromToken(ctx, token)
|
||||
if !uok {
|
||||
plog.Warningf("invalid auth token: %s", token)
|
||||
return nil, ErrInvalidAuthToken
|
||||
}
|
||||
return authInfo, nil
|
||||
}
|
||||
|
||||
func (as *authStore) GenTokenPrefix() (string, error) {
|
||||
return as.tokenProvider.genTokenPrefix()
|
||||
}
|
||||
|
||||
func decomposeOpts(optstr string) (string, map[string]string, error) {
|
||||
opts := strings.Split(optstr, ",")
|
||||
tokenType := opts[0]
|
||||
|
||||
typeSpecificOpts := make(map[string]string)
|
||||
for i := 1; i < len(opts); i++ {
|
||||
pair := strings.Split(opts[i], "=")
|
||||
|
||||
if len(pair) != 2 {
|
||||
plog.Errorf("invalid token specific option: %s", optstr)
|
||||
return "", nil, ErrInvalidAuthOpts
|
||||
}
|
||||
|
||||
if _, ok := typeSpecificOpts[pair[0]]; ok {
|
||||
plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr)
|
||||
return "", nil, ErrInvalidAuthOpts
|
||||
}
|
||||
|
||||
typeSpecificOpts[pair[0]] = pair[1]
|
||||
}
|
||||
|
||||
return tokenType, typeSpecificOpts, nil
|
||||
|
||||
}
|
||||
|
||||
func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) {
|
||||
tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts)
|
||||
if err != nil {
|
||||
return nil, ErrInvalidAuthOpts
|
||||
}
|
||||
|
||||
switch tokenType {
|
||||
case "simple":
|
||||
plog.Warningf("simple token is not cryptographically signed")
|
||||
return newTokenProviderSimple(indexWaiter), nil
|
||||
case "jwt":
|
||||
return newTokenProviderJWT(typeSpecificOpts)
|
||||
default:
|
||||
plog.Errorf("unknown token type: %s", tokenType)
|
||||
return nil, ErrInvalidAuthOpts
|
||||
}
|
||||
}
|
||||
|
3
vendor/github.com/coreos/etcd/client/BUILD
generated
vendored
3
vendor/github.com/coreos/etcd/client/BUILD
generated
vendored
@ -14,14 +14,15 @@ go_library(
|
||||
"keys.generated.go",
|
||||
"keys.go",
|
||||
"members.go",
|
||||
"srv.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "github.com/coreos/etcd/client",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/etcd/pkg/pathutil:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/srv:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/types:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/version:go_default_library",
|
||||
"//vendor/github.com/ugorji/go/codec:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
],
|
||||
|
52
vendor/github.com/coreos/etcd/client/client.go
generated
vendored
52
vendor/github.com/coreos/etcd/client/client.go
generated
vendored
@ -15,6 +15,7 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@ -27,6 +28,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/version"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -201,6 +204,9 @@ type Client interface {
|
||||
// returned
|
||||
SetEndpoints(eps []string) error
|
||||
|
||||
// GetVersion retrieves the current etcd server and cluster version
|
||||
GetVersion(ctx context.Context) (*version.Versions, error)
|
||||
|
||||
httpClient
|
||||
}
|
||||
|
||||
@ -366,12 +372,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
|
||||
if err == context.Canceled || err == context.DeadlineExceeded {
|
||||
return nil, nil, err
|
||||
}
|
||||
if isOneShot {
|
||||
return nil, nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if resp.StatusCode/100 == 5 {
|
||||
} else if resp.StatusCode/100 == 5 {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusInternalServerError, http.StatusServiceUnavailable:
|
||||
// TODO: make sure this is a no leader response
|
||||
@ -379,10 +380,16 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
|
||||
default:
|
||||
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
|
||||
}
|
||||
if isOneShot {
|
||||
return nil, nil, cerr.Errors[0]
|
||||
err = cerr.Errors[0]
|
||||
}
|
||||
if err != nil {
|
||||
if !isOneShot {
|
||||
continue
|
||||
}
|
||||
continue
|
||||
c.Lock()
|
||||
c.pinned = (k + 1) % leps
|
||||
c.Unlock()
|
||||
return nil, nil, err
|
||||
}
|
||||
if k != pinned {
|
||||
c.Lock()
|
||||
@ -477,6 +484,33 @@ func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration
|
||||
}
|
||||
}
|
||||
|
||||
func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
|
||||
act := &getAction{Prefix: "/version"}
|
||||
|
||||
resp, body, err := c.Do(ctx, act)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
if len(body) == 0 {
|
||||
return nil, ErrEmptyBody
|
||||
}
|
||||
var vresp version.Versions
|
||||
if err := json.Unmarshal(body, &vresp); err != nil {
|
||||
return nil, ErrInvalidJSON
|
||||
}
|
||||
return &vresp, nil
|
||||
default:
|
||||
var etcdErr Error
|
||||
if err := json.Unmarshal(body, &etcdErr); err != nil {
|
||||
return nil, ErrInvalidJSON
|
||||
}
|
||||
return nil, etcdErr
|
||||
}
|
||||
}
|
||||
|
||||
type roundTripResponse struct {
|
||||
resp *http.Response
|
||||
err error
|
||||
|
19
vendor/github.com/coreos/etcd/client/discover.go
generated
vendored
19
vendor/github.com/coreos/etcd/client/discover.go
generated
vendored
@ -14,8 +14,27 @@
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/pkg/srv"
|
||||
)
|
||||
|
||||
// Discoverer is an interface that wraps the Discover method.
|
||||
type Discoverer interface {
|
||||
// Discover looks up the etcd servers for the domain.
|
||||
Discover(domain string) ([]string, error)
|
||||
}
|
||||
|
||||
type srvDiscover struct{}
|
||||
|
||||
// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
|
||||
func NewSRVDiscover() Discoverer {
|
||||
return &srvDiscover{}
|
||||
}
|
||||
|
||||
func (d *srvDiscover) Discover(domain string) ([]string, error) {
|
||||
srvs, err := srv.GetClient("etcd-client", domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return srvs.Endpoints, nil
|
||||
}
|
||||
|
65
vendor/github.com/coreos/etcd/client/srv.go
generated
vendored
65
vendor/github.com/coreos/etcd/client/srv.go
generated
vendored
@ -1,65 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
var (
|
||||
// indirection for testing
|
||||
lookupSRV = net.LookupSRV
|
||||
)
|
||||
|
||||
type srvDiscover struct{}
|
||||
|
||||
// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
|
||||
func NewSRVDiscover() Discoverer {
|
||||
return &srvDiscover{}
|
||||
}
|
||||
|
||||
// Discover looks up the etcd servers for the domain.
|
||||
func (d *srvDiscover) Discover(domain string) ([]string, error) {
|
||||
var urls []*url.URL
|
||||
|
||||
updateURLs := func(service, scheme string) error {
|
||||
_, addrs, err := lookupSRV(service, "tcp", domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, srv := range addrs {
|
||||
urls = append(urls, &url.URL{
|
||||
Scheme: scheme,
|
||||
Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
errHTTPS := updateURLs("etcd-client-ssl", "https")
|
||||
errHTTP := updateURLs("etcd-client", "http")
|
||||
|
||||
if errHTTPS != nil && errHTTP != nil {
|
||||
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
|
||||
}
|
||||
|
||||
endpoints := make([]string, len(urls))
|
||||
for i := range urls {
|
||||
endpoints[i] = urls[i].String()
|
||||
}
|
||||
return endpoints, nil
|
||||
}
|
16
vendor/github.com/coreos/etcd/clientv3/BUILD
generated
vendored
16
vendor/github.com/coreos/etcd/clientv3/BUILD
generated
vendored
@ -4,18 +4,19 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"auth.go",
|
||||
"balancer.go",
|
||||
"client.go",
|
||||
"cluster.go",
|
||||
"compact_op.go",
|
||||
"compare.go",
|
||||
"config.go",
|
||||
"doc.go",
|
||||
"health_balancer.go",
|
||||
"kv.go",
|
||||
"lease.go",
|
||||
"logger.go",
|
||||
"maintenance.go",
|
||||
"op.go",
|
||||
"ready_wait.go",
|
||||
"retry.go",
|
||||
"sort.go",
|
||||
"txn.go",
|
||||
@ -28,15 +29,15 @@ go_library(
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/github.com/grpc-ecosystem/go-grpc-prometheus:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/credentials:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/grpclog:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/health/grpc_health_v1:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/keepalive:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/metadata:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/status:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -49,7 +50,12 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/github.com/coreos/etcd/clientv3/concurrency:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/clientv3/namespace:all-srcs",
|
||||
"//vendor/github.com/coreos/etcd/clientv3/naming:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
10
vendor/github.com/coreos/etcd/clientv3/README.md
generated
vendored
10
vendor/github.com/coreos/etcd/clientv3/README.md
generated
vendored
@ -1,6 +1,6 @@
|
||||
# etcd/clientv3
|
||||
|
||||
[](https://godoc.org/github.com/coreos/etcd/clientv3)
|
||||
[](https://godoc.org/github.com/coreos/etcd/clientv3)
|
||||
|
||||
`etcd/clientv3` is the official Go etcd client for v3.
|
||||
|
||||
@ -32,7 +32,7 @@ pass `context.WithTimeout` to APIs:
|
||||
|
||||
```go
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
resp, err := kvc.Put(ctx, "sample_key", "sample_value")
|
||||
resp, err := cli.Put(ctx, "sample_key", "sample_value")
|
||||
cancel()
|
||||
if err != nil {
|
||||
// handle error!
|
||||
@ -57,7 +57,7 @@ etcd client returns 2 types of errors:
|
||||
Here is the example code to handle client errors:
|
||||
|
||||
```go
|
||||
resp, err := kvc.Put(ctx, "", "")
|
||||
resp, err := cli.Put(ctx, "", "")
|
||||
if err != nil {
|
||||
switch err {
|
||||
case context.Canceled:
|
||||
@ -76,6 +76,10 @@ if err != nil {
|
||||
|
||||
The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go).
|
||||
|
||||
## Namespacing
|
||||
|
||||
The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix.
|
||||
|
||||
## Examples
|
||||
|
||||
More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).
|
||||
|
25
vendor/github.com/coreos/etcd/clientv3/auth.go
generated
vendored
25
vendor/github.com/coreos/etcd/clientv3/auth.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/auth/authpb"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@ -100,28 +101,20 @@ type Auth interface {
|
||||
}
|
||||
|
||||
type auth struct {
|
||||
c *Client
|
||||
|
||||
conn *grpc.ClientConn // conn in-use
|
||||
remote pb.AuthClient
|
||||
}
|
||||
|
||||
func NewAuth(c *Client) Auth {
|
||||
conn := c.ActiveConnection()
|
||||
return &auth{
|
||||
conn: c.ActiveConnection(),
|
||||
remote: pb.NewAuthClient(conn),
|
||||
c: c,
|
||||
}
|
||||
return &auth{remote: RetryAuthClient(c)}
|
||||
}
|
||||
|
||||
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{})
|
||||
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{})
|
||||
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
@ -146,12 +139,12 @@ func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (
|
||||
}
|
||||
|
||||
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
|
||||
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name})
|
||||
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
|
||||
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{})
|
||||
return (*AuthUserListResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
@ -176,12 +169,12 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran
|
||||
}
|
||||
|
||||
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
|
||||
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role})
|
||||
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
|
||||
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{})
|
||||
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
@ -209,7 +202,7 @@ type authenticator struct {
|
||||
}
|
||||
|
||||
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
|
||||
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password})
|
||||
return (*AuthenticateResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
|
239
vendor/github.com/coreos/etcd/clientv3/balancer.go
generated
vendored
239
vendor/github.com/coreos/etcd/clientv3/balancer.go
generated
vendored
@ -1,239 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
|
||||
// any active connection to endpoints at the time.
|
||||
// This error is returned only when opts.BlockingWait is true.
|
||||
var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available")
|
||||
|
||||
// simpleBalancer does the bare minimum to expose multiple eps
|
||||
// to the grpc reconnection code path
|
||||
type simpleBalancer struct {
|
||||
// addrs are the client's endpoints for grpc
|
||||
addrs []grpc.Address
|
||||
// notifyCh notifies grpc of the set of addresses for connecting
|
||||
notifyCh chan []grpc.Address
|
||||
|
||||
// readyc closes once the first connection is up
|
||||
readyc chan struct{}
|
||||
readyOnce sync.Once
|
||||
|
||||
// mu protects upEps, pinAddr, and connectingAddr
|
||||
mu sync.RWMutex
|
||||
// upEps holds the current endpoints that have an active connection
|
||||
upEps map[string]struct{}
|
||||
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
|
||||
upc chan struct{}
|
||||
|
||||
// grpc issues TLS cert checks using the string passed into dial so
|
||||
// that string must be the host. To recover the full scheme://host URL,
|
||||
// have a map from hosts to the original endpoint.
|
||||
host2ep map[string]string
|
||||
|
||||
// pinAddr is the currently pinned address; set to the empty string on
|
||||
// intialization and shutdown.
|
||||
pinAddr string
|
||||
|
||||
closed bool
|
||||
}
|
||||
|
||||
func newSimpleBalancer(eps []string) *simpleBalancer {
|
||||
notifyCh := make(chan []grpc.Address, 1)
|
||||
addrs := make([]grpc.Address, len(eps))
|
||||
for i := range eps {
|
||||
addrs[i].Addr = getHost(eps[i])
|
||||
}
|
||||
notifyCh <- addrs
|
||||
sb := &simpleBalancer{
|
||||
addrs: addrs,
|
||||
notifyCh: notifyCh,
|
||||
readyc: make(chan struct{}),
|
||||
upEps: make(map[string]struct{}),
|
||||
upc: make(chan struct{}),
|
||||
host2ep: getHost2ep(eps),
|
||||
}
|
||||
return sb
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
|
||||
|
||||
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.upc
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) getEndpoint(host string) string {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.host2ep[host]
|
||||
}
|
||||
|
||||
func getHost2ep(eps []string) map[string]string {
|
||||
hm := make(map[string]string, len(eps))
|
||||
for i := range eps {
|
||||
_, host, _ := parseEndpoint(eps[i])
|
||||
hm[host] = eps[i]
|
||||
}
|
||||
return hm
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) updateAddrs(eps []string) {
|
||||
np := getHost2ep(eps)
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
match := len(np) == len(b.host2ep)
|
||||
for k, v := range np {
|
||||
if b.host2ep[k] != v {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if match {
|
||||
// same endpoints, so no need to update address
|
||||
return
|
||||
}
|
||||
|
||||
b.host2ep = np
|
||||
|
||||
addrs := make([]grpc.Address, 0, len(eps))
|
||||
for i := range eps {
|
||||
addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])})
|
||||
}
|
||||
b.addrs = addrs
|
||||
b.notifyCh <- addrs
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
// gRPC might call Up after it called Close. We add this check
|
||||
// to "fix" it up at application layer. Or our simplerBalancer
|
||||
// might panic since b.upc is closed.
|
||||
if b.closed {
|
||||
return func(err error) {}
|
||||
}
|
||||
|
||||
if len(b.upEps) == 0 {
|
||||
// notify waiting Get()s and pin first connected address
|
||||
close(b.upc)
|
||||
b.pinAddr = addr.Addr
|
||||
}
|
||||
b.upEps[addr.Addr] = struct{}{}
|
||||
|
||||
// notify client that a connection is up
|
||||
b.readyOnce.Do(func() { close(b.readyc) })
|
||||
|
||||
return func(err error) {
|
||||
b.mu.Lock()
|
||||
delete(b.upEps, addr.Addr)
|
||||
if len(b.upEps) == 0 && b.pinAddr != "" {
|
||||
b.upc = make(chan struct{})
|
||||
} else if b.pinAddr == addr.Addr {
|
||||
// choose new random up endpoint
|
||||
for k := range b.upEps {
|
||||
b.pinAddr = k
|
||||
break
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
||||
var addr string
|
||||
|
||||
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
|
||||
// an address it has notified via Notify immediately instead of blocking.
|
||||
if !opts.BlockingWait {
|
||||
b.mu.RLock()
|
||||
closed := b.closed
|
||||
addr = b.pinAddr
|
||||
upEps := len(b.upEps)
|
||||
b.mu.RUnlock()
|
||||
if closed {
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
}
|
||||
|
||||
if upEps == 0 {
|
||||
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
|
||||
}
|
||||
return grpc.Address{Addr: addr}, func() {}, nil
|
||||
}
|
||||
|
||||
for {
|
||||
b.mu.RLock()
|
||||
ch := b.upc
|
||||
b.mu.RUnlock()
|
||||
select {
|
||||
case <-ch:
|
||||
case <-ctx.Done():
|
||||
return grpc.Address{Addr: ""}, nil, ctx.Err()
|
||||
}
|
||||
b.mu.RLock()
|
||||
addr = b.pinAddr
|
||||
upEps := len(b.upEps)
|
||||
b.mu.RUnlock()
|
||||
if addr == "" {
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
}
|
||||
if upEps > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return grpc.Address{Addr: addr}, func() {}, nil
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
|
||||
|
||||
func (b *simpleBalancer) Close() error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
// In case gRPC calls close twice. TODO: remove the checking
|
||||
// when we are sure that gRPC wont call close twice.
|
||||
if b.closed {
|
||||
return nil
|
||||
}
|
||||
b.closed = true
|
||||
close(b.notifyCh)
|
||||
// terminate all waiting Get()s
|
||||
b.pinAddr = ""
|
||||
if len(b.upEps) == 0 {
|
||||
close(b.upc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHost(ep string) string {
|
||||
url, uerr := url.Parse(ep)
|
||||
if uerr != nil || !strings.Contains(ep, "://") {
|
||||
return ep
|
||||
}
|
||||
return url.Host
|
||||
}
|
226
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
226
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
@ -20,22 +20,25 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
|
||||
ErrOldCluster = errors.New("etcdclient: old cluster version")
|
||||
)
|
||||
|
||||
// Client provides and manages an etcd v3 client session.
|
||||
@ -47,19 +50,20 @@ type Client struct {
|
||||
Auth
|
||||
Maintenance
|
||||
|
||||
conn *grpc.ClientConn
|
||||
cfg Config
|
||||
creds *credentials.TransportCredentials
|
||||
balancer *simpleBalancer
|
||||
retryWrapper retryRpcFunc
|
||||
retryAuthWrapper retryRpcFunc
|
||||
conn *grpc.ClientConn
|
||||
dialerrc chan error
|
||||
|
||||
cfg Config
|
||||
creds *credentials.TransportCredentials
|
||||
balancer *healthBalancer
|
||||
mu sync.Mutex
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// Username is a username for authentication
|
||||
// Username is a user name for authentication.
|
||||
Username string
|
||||
// Password is a password for authentication
|
||||
// Password is a password for authentication.
|
||||
Password string
|
||||
// tokenCred is an instance of WithPerRPCCredentials()'s argument
|
||||
tokenCred *authTokenCredential
|
||||
@ -74,26 +78,28 @@ func New(cfg Config) (*Client, error) {
|
||||
return newClient(&cfg)
|
||||
}
|
||||
|
||||
// NewCtxClient creates a client with a context but no underlying grpc
|
||||
// connection. This is useful for embedded cases that override the
|
||||
// service interface implementations and do not need connection management.
|
||||
func NewCtxClient(ctx context.Context) *Client {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
return &Client{ctx: cctx, cancel: cancel}
|
||||
}
|
||||
|
||||
// NewFromURL creates a new etcdv3 client from a URL.
|
||||
func NewFromURL(url string) (*Client, error) {
|
||||
return New(Config{Endpoints: []string{url}})
|
||||
}
|
||||
|
||||
// NewFromConfigFile creates a new etcdv3 client from a configuration file.
|
||||
func NewFromConfigFile(path string) (*Client, error) {
|
||||
cfg, err := configFromFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return New(*cfg)
|
||||
}
|
||||
|
||||
// Close shuts down the client's etcd connections.
|
||||
func (c *Client) Close() error {
|
||||
c.cancel()
|
||||
c.Watcher.Close()
|
||||
c.Lease.Close()
|
||||
return toErr(c.ctx, c.conn.Close())
|
||||
if c.conn != nil {
|
||||
return toErr(c.ctx, c.conn.Close())
|
||||
}
|
||||
return c.ctx.Err()
|
||||
}
|
||||
|
||||
// Ctx is a context for "out of band" messages (e.g., for sending
|
||||
@ -111,8 +117,23 @@ func (c *Client) Endpoints() (eps []string) {
|
||||
|
||||
// SetEndpoints updates client's endpoints.
|
||||
func (c *Client) SetEndpoints(eps ...string) {
|
||||
c.mu.Lock()
|
||||
c.cfg.Endpoints = eps
|
||||
c.balancer.updateAddrs(eps)
|
||||
c.mu.Unlock()
|
||||
c.balancer.updateAddrs(eps...)
|
||||
|
||||
// updating notifyCh can trigger new connections,
|
||||
// need update addrs if all connections are down
|
||||
// or addrs does not include pinAddr.
|
||||
c.balancer.mu.RLock()
|
||||
update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr)
|
||||
c.balancer.mu.RUnlock()
|
||||
if update {
|
||||
select {
|
||||
case c.balancer.updateAddrsC <- notifyNext:
|
||||
case <-c.balancer.stopc:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
||||
@ -139,8 +160,10 @@ func (c *Client) autoSync() {
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
case <-time.After(c.cfg.AutoSyncInterval):
|
||||
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second)
|
||||
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() {
|
||||
ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
|
||||
err := c.Sync(ctx)
|
||||
cancel()
|
||||
if err != nil && err != c.ctx.Err() {
|
||||
logger.Println("Auto sync endpoints failed:", err)
|
||||
}
|
||||
}
|
||||
@ -169,7 +192,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
||||
host = endpoint
|
||||
url, uerr := url.Parse(endpoint)
|
||||
if uerr != nil || !strings.Contains(endpoint, "://") {
|
||||
return
|
||||
return proto, host, scheme
|
||||
}
|
||||
scheme = url.Scheme
|
||||
|
||||
@ -177,12 +200,13 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
||||
host = url.Host
|
||||
switch url.Scheme {
|
||||
case "http", "https":
|
||||
case "unix":
|
||||
case "unix", "unixs":
|
||||
proto = "unix"
|
||||
host = url.Host + url.Path
|
||||
default:
|
||||
proto, host = "", ""
|
||||
}
|
||||
return
|
||||
return proto, host, scheme
|
||||
}
|
||||
|
||||
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
|
||||
@ -191,7 +215,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden
|
||||
case "unix":
|
||||
case "http":
|
||||
creds = nil
|
||||
case "https":
|
||||
case "https", "unixs":
|
||||
if creds != nil {
|
||||
break
|
||||
}
|
||||
@ -201,7 +225,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden
|
||||
default:
|
||||
creds = nil
|
||||
}
|
||||
return
|
||||
return creds
|
||||
}
|
||||
|
||||
// dialSetupOpts gives the dial opts prior to any authentication
|
||||
@ -209,10 +233,22 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
|
||||
}
|
||||
if c.cfg.DialKeepAliveTime > 0 {
|
||||
params := keepalive.ClientParameters{
|
||||
Time: c.cfg.DialKeepAliveTime,
|
||||
Timeout: c.cfg.DialKeepAliveTimeout,
|
||||
}
|
||||
opts = append(opts, grpc.WithKeepaliveParams(params))
|
||||
}
|
||||
opts = append(opts, dopts...)
|
||||
|
||||
f := func(host string, t time.Duration) (net.Conn, error) {
|
||||
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host))
|
||||
proto, host, _ := parseEndpoint(c.balancer.endpoint(host))
|
||||
if host == "" && endpoint != "" {
|
||||
// dialing an endpoint not in the balancer; use
|
||||
// endpoint passed into dial
|
||||
proto, host, _ = parseEndpoint(endpoint)
|
||||
}
|
||||
if proto == "" {
|
||||
return nil, fmt.Errorf("unknown scheme for %q", host)
|
||||
}
|
||||
@ -222,7 +258,14 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
|
||||
default:
|
||||
}
|
||||
dialer := &net.Dialer{Timeout: t}
|
||||
return dialer.DialContext(c.ctx, proto, host)
|
||||
conn, err := dialer.DialContext(c.ctx, proto, host)
|
||||
if err != nil {
|
||||
select {
|
||||
case c.dialerrc <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
opts = append(opts, grpc.WithDialer(f))
|
||||
|
||||
@ -288,21 +331,23 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
if err := c.getToken(ctx); err != nil {
|
||||
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
|
||||
err = grpc.ErrClientConnTimeout
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
|
||||
err := c.getToken(ctx)
|
||||
if err != nil {
|
||||
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
|
||||
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
|
||||
err = context.DeadlineExceeded
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
|
||||
}
|
||||
}
|
||||
|
||||
// add metrics options
|
||||
opts = append(opts, grpc.WithUnaryInterceptor(prometheus.UnaryClientInterceptor))
|
||||
opts = append(opts, grpc.WithStreamInterceptor(prometheus.StreamClientInterceptor))
|
||||
opts = append(opts, c.cfg.DialOptions...)
|
||||
|
||||
conn, err := grpc.Dial(host, opts...)
|
||||
conn, err := grpc.DialContext(c.ctx, host, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -313,7 +358,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
|
||||
// when the cluster has a leader.
|
||||
func WithRequireLeader(ctx context.Context) context.Context {
|
||||
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
|
||||
return metadata.NewContext(ctx, md)
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
func newClient(cfg *Config) (*Client, error) {
|
||||
@ -327,20 +372,31 @@ func newClient(cfg *Config) (*Client, error) {
|
||||
}
|
||||
|
||||
// use a temporary skeleton client to bootstrap first connection
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
baseCtx := context.TODO()
|
||||
if cfg.Context != nil {
|
||||
baseCtx = cfg.Context
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(baseCtx)
|
||||
client := &Client{
|
||||
conn: nil,
|
||||
cfg: *cfg,
|
||||
creds: creds,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
conn: nil,
|
||||
dialerrc: make(chan error, 1),
|
||||
cfg: *cfg,
|
||||
creds: creds,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
if cfg.Username != "" && cfg.Password != "" {
|
||||
client.Username = cfg.Username
|
||||
client.Password = cfg.Password
|
||||
}
|
||||
|
||||
client.balancer = newSimpleBalancer(cfg.Endpoints)
|
||||
client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) {
|
||||
return grpcHealthCheck(client, ep)
|
||||
})
|
||||
|
||||
// use Endpoints[0] so that for https:// without any tls config given, then
|
||||
// grpc will assume the certificate server name is the endpoint host.
|
||||
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
|
||||
if err != nil {
|
||||
client.cancel()
|
||||
@ -348,24 +404,27 @@ func newClient(cfg *Config) (*Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
client.conn = conn
|
||||
client.retryWrapper = client.newRetryWrapper()
|
||||
client.retryAuthWrapper = client.newAuthRetryWrapper()
|
||||
|
||||
// wait for a connection
|
||||
if cfg.DialTimeout > 0 {
|
||||
hasConn := false
|
||||
waitc := time.After(cfg.DialTimeout)
|
||||
select {
|
||||
case <-client.balancer.readyc:
|
||||
case <-client.balancer.ready():
|
||||
hasConn = true
|
||||
case <-ctx.Done():
|
||||
case <-waitc:
|
||||
}
|
||||
if !hasConn {
|
||||
err := context.DeadlineExceeded
|
||||
select {
|
||||
case err = <-client.dialerrc:
|
||||
default:
|
||||
}
|
||||
client.cancel()
|
||||
client.balancer.Close()
|
||||
conn.Close()
|
||||
return nil, grpc.ErrClientConnTimeout
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -376,10 +435,57 @@ func newClient(cfg *Config) (*Client, error) {
|
||||
client.Auth = NewAuth(client)
|
||||
client.Maintenance = NewMaintenance(client)
|
||||
|
||||
if cfg.RejectOldCluster {
|
||||
if err := client.checkVersion(); err != nil {
|
||||
client.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
go client.autoSync()
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (c *Client) checkVersion() (err error) {
|
||||
var wg sync.WaitGroup
|
||||
errc := make(chan error, len(c.cfg.Endpoints))
|
||||
ctx, cancel := context.WithCancel(c.ctx)
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||
}
|
||||
wg.Add(len(c.cfg.Endpoints))
|
||||
for _, ep := range c.cfg.Endpoints {
|
||||
// if cluster is current, any endpoint gives a recent version
|
||||
go func(e string) {
|
||||
defer wg.Done()
|
||||
resp, rerr := c.Status(ctx, e)
|
||||
if rerr != nil {
|
||||
errc <- rerr
|
||||
return
|
||||
}
|
||||
vs := strings.Split(resp.Version, ".")
|
||||
maj, min := 0, 0
|
||||
if len(vs) >= 2 {
|
||||
maj, _ = strconv.Atoi(vs[0])
|
||||
min, rerr = strconv.Atoi(vs[1])
|
||||
}
|
||||
if maj < 3 || (maj == 3 && min < 2) {
|
||||
rerr = ErrOldCluster
|
||||
}
|
||||
errc <- rerr
|
||||
}(ep)
|
||||
}
|
||||
// wait for success
|
||||
for i := 0; i < len(c.cfg.Endpoints); i++ {
|
||||
if err = <-errc; err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
wg.Wait()
|
||||
return err
|
||||
}
|
||||
|
||||
// ActiveConnection returns the current in-use connection
|
||||
func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
|
||||
|
||||
@ -392,14 +498,14 @@ func isHaltErr(ctx context.Context, err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
code := grpc.Code(err)
|
||||
ev, _ := status.FromError(err)
|
||||
// Unavailable codes mean the system will be right back.
|
||||
// (e.g., can't connect, lost leader)
|
||||
// Treat Internal codes as if something failed, leaving the
|
||||
// system in an inconsistent state, but retrying could make progress.
|
||||
// (e.g., failed in middle of send, corrupted frame)
|
||||
// TODO: are permanent Internal errors possible from grpc?
|
||||
return code != codes.Unavailable && code != codes.Internal
|
||||
return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
|
||||
}
|
||||
|
||||
func toErr(ctx context.Context, err error) error {
|
||||
@ -410,7 +516,8 @@ func toErr(ctx context.Context, err error) error {
|
||||
if _, ok := err.(rpctypes.EtcdError); ok {
|
||||
return err
|
||||
}
|
||||
code := grpc.Code(err)
|
||||
ev, _ := status.FromError(err)
|
||||
code := ev.Code()
|
||||
switch code {
|
||||
case codes.DeadlineExceeded:
|
||||
fallthrough
|
||||
@ -419,9 +526,16 @@ func toErr(ctx context.Context, err error) error {
|
||||
err = ctx.Err()
|
||||
}
|
||||
case codes.Unavailable:
|
||||
err = ErrNoAvailableEndpoints
|
||||
case codes.FailedPrecondition:
|
||||
err = grpc.ErrClientConnClosing
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func canceledByCaller(stopCtx context.Context, err error) bool {
|
||||
if stopCtx.Err() == nil || err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return err == context.Canceled || err == context.DeadlineExceeded
|
||||
}
|
||||
|
46
vendor/github.com/coreos/etcd/clientv3/cluster.go
generated
vendored
46
vendor/github.com/coreos/etcd/clientv3/cluster.go
generated
vendored
@ -16,8 +16,8 @@ package clientv3
|
||||
|
||||
import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type (
|
||||
@ -50,53 +50,43 @@ func NewCluster(c *Client) Cluster {
|
||||
return &cluster{remote: RetryClusterClient(c)}
|
||||
}
|
||||
|
||||
func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster {
|
||||
return &cluster{remote: remote}
|
||||
}
|
||||
|
||||
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
|
||||
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberAdd(ctx, r)
|
||||
if err == nil {
|
||||
return (*MemberAddResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
return (*MemberAddResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
|
||||
r := &pb.MemberRemoveRequest{ID: id}
|
||||
resp, err := c.remote.MemberRemove(ctx, r)
|
||||
if err == nil {
|
||||
return (*MemberRemoveResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
return (*MemberRemoveResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
|
||||
// it is safe to retry on update.
|
||||
for {
|
||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*MemberUpdateResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberUpdate(ctx, r)
|
||||
if err == nil {
|
||||
return (*MemberUpdateResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
|
||||
// it is safe to retry on list.
|
||||
for {
|
||||
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*MemberListResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{})
|
||||
if err == nil {
|
||||
return (*MemberListResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
6
vendor/github.com/coreos/etcd/clientv3/compact_op.go
generated
vendored
6
vendor/github.com/coreos/etcd/clientv3/compact_op.go
generated
vendored
@ -44,10 +44,8 @@ func (op CompactOp) toRequest() *pb.CompactionRequest {
|
||||
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
|
||||
}
|
||||
|
||||
// WithCompactPhysical makes compact RPC call wait until
|
||||
// the compaction is physically applied to the local database
|
||||
// such that compacted entries are totally removed from the
|
||||
// backend database.
|
||||
// WithCompactPhysical makes Compact wait until all compacted entries are
|
||||
// removed from the etcd server's storage.
|
||||
func WithCompactPhysical() CompactOption {
|
||||
return func(op *CompactOp) { op.physical = true }
|
||||
}
|
||||
|
27
vendor/github.com/coreos/etcd/clientv3/compare.go
generated
vendored
27
vendor/github.com/coreos/etcd/clientv3/compare.go
generated
vendored
@ -82,6 +82,24 @@ func ModRevision(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
|
||||
}
|
||||
|
||||
// KeyBytes returns the byte slice holding with the comparison key.
|
||||
func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
|
||||
|
||||
// WithKeyBytes sets the byte slice for the comparison key.
|
||||
func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
|
||||
|
||||
// ValueBytes returns the byte slice holding the comparison value, if any.
|
||||
func (cmp *Cmp) ValueBytes() []byte {
|
||||
if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
|
||||
return tu.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithValueBytes sets the byte slice for the comparison's value.
|
||||
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
|
||||
|
||||
// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
|
||||
func mustInt64(val interface{}) int64 {
|
||||
if v, ok := val.(int64); ok {
|
||||
return v
|
||||
@ -91,3 +109,12 @@ func mustInt64(val interface{}) int64 {
|
||||
}
|
||||
panic("bad value")
|
||||
}
|
||||
|
||||
// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
|
||||
// int64 otherwise.
|
||||
func mustInt64orLeaseID(val interface{}) int64 {
|
||||
if v, ok := val.(LeaseID); ok {
|
||||
return int64(v)
|
||||
}
|
||||
return mustInt64(val)
|
||||
}
|
||||
|
35
vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD
generated
vendored
Normal file
35
vendor/github.com/coreos/etcd/clientv3/concurrency/BUILD
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"election.go",
|
||||
"key.go",
|
||||
"mutex.go",
|
||||
"session.go",
|
||||
"stm.go",
|
||||
],
|
||||
importpath = "github.com/coreos/etcd/clientv3/concurrency",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
17
vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go
generated
vendored
Normal file
17
vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package concurrency implements concurrency operations on top of
|
||||
// etcd such as distributed locks, barriers, and elections.
|
||||
package concurrency
|
246
vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
generated
vendored
Normal file
246
vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
generated
vendored
Normal file
@ -0,0 +1,246 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrElectionNotLeader = errors.New("election: not leader")
|
||||
ErrElectionNoLeader = errors.New("election: no leader")
|
||||
)
|
||||
|
||||
type Election struct {
|
||||
session *Session
|
||||
|
||||
keyPrefix string
|
||||
|
||||
leaderKey string
|
||||
leaderRev int64
|
||||
leaderSession *Session
|
||||
hdr *pb.ResponseHeader
|
||||
}
|
||||
|
||||
// NewElection returns a new election on a given key prefix.
|
||||
func NewElection(s *Session, pfx string) *Election {
|
||||
return &Election{session: s, keyPrefix: pfx + "/"}
|
||||
}
|
||||
|
||||
// ResumeElection initializes an election with a known leader.
|
||||
func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
|
||||
return &Election{
|
||||
session: s,
|
||||
leaderKey: leaderKey,
|
||||
leaderRev: leaderRev,
|
||||
leaderSession: s,
|
||||
}
|
||||
}
|
||||
|
||||
// Campaign puts a value as eligible for the election. It blocks until
|
||||
// it is elected, an error occurs, or the context is cancelled.
|
||||
func (e *Election) Campaign(ctx context.Context, val string) error {
|
||||
s := e.session
|
||||
client := e.session.Client()
|
||||
|
||||
k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
|
||||
txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
|
||||
txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
|
||||
txn = txn.Else(v3.OpGet(k))
|
||||
resp, err := txn.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
|
||||
if !resp.Succeeded {
|
||||
kv := resp.Responses[0].GetResponseRange().Kvs[0]
|
||||
e.leaderRev = kv.CreateRevision
|
||||
if string(kv.Value) != val {
|
||||
if err = e.Proclaim(ctx, val); err != nil {
|
||||
e.Resign(ctx)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
|
||||
if err != nil {
|
||||
// clean up in case of context cancel
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
e.Resign(client.Ctx())
|
||||
default:
|
||||
e.leaderSession = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
e.hdr = resp.Header
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Proclaim lets the leader announce a new value without another election.
|
||||
func (e *Election) Proclaim(ctx context.Context, val string) error {
|
||||
if e.leaderSession == nil {
|
||||
return ErrElectionNotLeader
|
||||
}
|
||||
client := e.session.Client()
|
||||
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
|
||||
txn := client.Txn(ctx).If(cmp)
|
||||
txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
|
||||
tresp, terr := txn.Commit()
|
||||
if terr != nil {
|
||||
return terr
|
||||
}
|
||||
if !tresp.Succeeded {
|
||||
e.leaderKey = ""
|
||||
return ErrElectionNotLeader
|
||||
}
|
||||
|
||||
e.hdr = tresp.Header
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resign lets a leader start a new election.
|
||||
func (e *Election) Resign(ctx context.Context) (err error) {
|
||||
if e.leaderSession == nil {
|
||||
return nil
|
||||
}
|
||||
client := e.session.Client()
|
||||
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
|
||||
resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
|
||||
if err == nil {
|
||||
e.hdr = resp.Header
|
||||
}
|
||||
e.leaderKey = ""
|
||||
e.leaderSession = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Leader returns the leader value for the current election.
|
||||
func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
|
||||
client := e.session.Client()
|
||||
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(resp.Kvs) == 0 {
|
||||
// no leader currently elected
|
||||
return nil, ErrElectionNoLeader
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Observe returns a channel that reliably observes ordered leader proposals
|
||||
// as GetResponse values on every current elected leader key. It will not
|
||||
// necessarily fetch all historical leader updates, but will always post the
|
||||
// most recent leader value.
|
||||
//
|
||||
// The channel closes when the context is canceled or the underlying watcher
|
||||
// is otherwise disrupted.
|
||||
func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
|
||||
retc := make(chan v3.GetResponse)
|
||||
go e.observe(ctx, retc)
|
||||
return retc
|
||||
}
|
||||
|
||||
func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
||||
client := e.session.Client()
|
||||
|
||||
defer close(ch)
|
||||
for {
|
||||
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var kv *mvccpb.KeyValue
|
||||
var hdr *pb.ResponseHeader
|
||||
|
||||
if len(resp.Kvs) == 0 {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
// wait for first key put on prefix
|
||||
opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
|
||||
wch := client.Watch(cctx, e.keyPrefix, opts...)
|
||||
for kv == nil {
|
||||
wr, ok := <-wch
|
||||
if !ok || wr.Err() != nil {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
// only accept puts; a delete will make observe() spin
|
||||
for _, ev := range wr.Events {
|
||||
if ev.Type == mvccpb.PUT {
|
||||
hdr, kv = &wr.Header, ev.Kv
|
||||
// may have multiple revs; hdr.rev = the last rev
|
||||
// set to kv's rev in case batch has multiple Puts
|
||||
hdr.Revision = kv.ModRevision
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
} else {
|
||||
hdr, kv = resp.Header, resp.Kvs[0]
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
|
||||
keyDeleted := false
|
||||
for !keyDeleted {
|
||||
wr, ok := <-wch
|
||||
if !ok {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
for _, ev := range wr.Events {
|
||||
if ev.Type == mvccpb.DELETE {
|
||||
keyDeleted = true
|
||||
break
|
||||
}
|
||||
resp.Header = &wr.Header
|
||||
resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
|
||||
select {
|
||||
case ch <- *resp:
|
||||
case <-cctx.Done():
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
// Key returns the leader key if elected, empty string otherwise.
|
||||
func (e *Election) Key() string { return e.leaderKey }
|
||||
|
||||
// Rev returns the leader key's creation revision, if elected.
|
||||
func (e *Election) Rev() int64 { return e.leaderRev }
|
||||
|
||||
// Header is the response header from the last successful election proposal.
|
||||
func (e *Election) Header() *pb.ResponseHeader { return e.hdr }
|
66
vendor/github.com/coreos/etcd/clientv3/concurrency/key.go
generated
vendored
Normal file
66
vendor/github.com/coreos/etcd/clientv3/concurrency/key.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var wr v3.WatchResponse
|
||||
wch := client.Watch(cctx, key, v3.WithRev(rev))
|
||||
for wr = range wch {
|
||||
for _, ev := range wr.Events {
|
||||
if ev.Type == mvccpb.DELETE {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := wr.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("lost watcher waiting for delete")
|
||||
}
|
||||
|
||||
// waitDeletes efficiently waits until all keys matching the prefix and no greater
|
||||
// than the create revision.
|
||||
func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
|
||||
getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
|
||||
for {
|
||||
resp, err := client.Get(ctx, pfx, getOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(resp.Kvs) == 0 {
|
||||
return resp.Header, nil
|
||||
}
|
||||
lastKey := string(resp.Kvs[0].Key)
|
||||
if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
119
vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
generated
vendored
Normal file
119
vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Mutex implements the sync Locker interface with etcd
|
||||
type Mutex struct {
|
||||
s *Session
|
||||
|
||||
pfx string
|
||||
myKey string
|
||||
myRev int64
|
||||
hdr *pb.ResponseHeader
|
||||
}
|
||||
|
||||
func NewMutex(s *Session, pfx string) *Mutex {
|
||||
return &Mutex{s, pfx + "/", "", -1, nil}
|
||||
}
|
||||
|
||||
// Lock locks the mutex with a cancelable context. If the context is canceled
|
||||
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
|
||||
func (m *Mutex) Lock(ctx context.Context) error {
|
||||
s := m.s
|
||||
client := m.s.Client()
|
||||
|
||||
m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
|
||||
cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
|
||||
// put self in lock waiters via myKey; oldest waiter holds lock
|
||||
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
|
||||
// reuse key in case this session already holds the lock
|
||||
get := v3.OpGet(m.myKey)
|
||||
// fetch current holder to complete uncontended path with only one RPC
|
||||
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
|
||||
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.myRev = resp.Header.Revision
|
||||
if !resp.Succeeded {
|
||||
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
|
||||
}
|
||||
// if no key on prefix / the minimum rev is key, already hold the lock
|
||||
ownerKey := resp.Responses[1].GetResponseRange().Kvs
|
||||
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
|
||||
m.hdr = resp.Header
|
||||
return nil
|
||||
}
|
||||
|
||||
// wait for deletion revisions prior to myKey
|
||||
hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
|
||||
// release lock key if cancelled
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
m.Unlock(client.Ctx())
|
||||
default:
|
||||
m.hdr = hdr
|
||||
}
|
||||
return werr
|
||||
}
|
||||
|
||||
func (m *Mutex) Unlock(ctx context.Context) error {
|
||||
client := m.s.Client()
|
||||
if _, err := client.Delete(ctx, m.myKey); err != nil {
|
||||
return err
|
||||
}
|
||||
m.myKey = "\x00"
|
||||
m.myRev = -1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Mutex) IsOwner() v3.Cmp {
|
||||
return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
|
||||
}
|
||||
|
||||
func (m *Mutex) Key() string { return m.myKey }
|
||||
|
||||
// Header is the response header received from etcd on acquiring the lock.
|
||||
func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
|
||||
|
||||
type lockerMutex struct{ *Mutex }
|
||||
|
||||
func (lm *lockerMutex) Lock() {
|
||||
client := lm.s.Client()
|
||||
if err := lm.Mutex.Lock(client.Ctx()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
func (lm *lockerMutex) Unlock() {
|
||||
client := lm.s.Client()
|
||||
if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// NewLocker creates a sync.Locker backed by an etcd mutex.
|
||||
func NewLocker(s *Session, pfx string) sync.Locker {
|
||||
return &lockerMutex{NewMutex(s, pfx)}
|
||||
}
|
142
vendor/github.com/coreos/etcd/clientv3/concurrency/session.go
generated
vendored
Normal file
142
vendor/github.com/coreos/etcd/clientv3/concurrency/session.go
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const defaultSessionTTL = 60
|
||||
|
||||
// Session represents a lease kept alive for the lifetime of a client.
|
||||
// Fault-tolerant applications may use sessions to reason about liveness.
|
||||
type Session struct {
|
||||
client *v3.Client
|
||||
opts *sessionOptions
|
||||
id v3.LeaseID
|
||||
|
||||
cancel context.CancelFunc
|
||||
donec <-chan struct{}
|
||||
}
|
||||
|
||||
// NewSession gets the leased session for a client.
|
||||
func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
|
||||
ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
|
||||
for _, opt := range opts {
|
||||
opt(ops)
|
||||
}
|
||||
|
||||
id := ops.leaseID
|
||||
if id == v3.NoLease {
|
||||
resp, err := client.Grant(ops.ctx, int64(ops.ttl))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id = v3.LeaseID(resp.ID)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ops.ctx)
|
||||
keepAlive, err := client.KeepAlive(ctx, id)
|
||||
if err != nil || keepAlive == nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
donec := make(chan struct{})
|
||||
s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
|
||||
|
||||
// keep the lease alive until client error or cancelled context
|
||||
go func() {
|
||||
defer close(donec)
|
||||
for range keepAlive {
|
||||
// eat messages until keep alive channel closes
|
||||
}
|
||||
}()
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Client is the etcd client that is attached to the session.
|
||||
func (s *Session) Client() *v3.Client {
|
||||
return s.client
|
||||
}
|
||||
|
||||
// Lease is the lease ID for keys bound to the session.
|
||||
func (s *Session) Lease() v3.LeaseID { return s.id }
|
||||
|
||||
// Done returns a channel that closes when the lease is orphaned, expires, or
|
||||
// is otherwise no longer being refreshed.
|
||||
func (s *Session) Done() <-chan struct{} { return s.donec }
|
||||
|
||||
// Orphan ends the refresh for the session lease. This is useful
|
||||
// in case the state of the client connection is indeterminate (revoke
|
||||
// would fail) or when transferring lease ownership.
|
||||
func (s *Session) Orphan() {
|
||||
s.cancel()
|
||||
<-s.donec
|
||||
}
|
||||
|
||||
// Close orphans the session and revokes the session lease.
|
||||
func (s *Session) Close() error {
|
||||
s.Orphan()
|
||||
// if revoke takes longer than the ttl, lease is expired anyway
|
||||
ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
|
||||
_, err := s.client.Revoke(ctx, s.id)
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
type sessionOptions struct {
|
||||
ttl int
|
||||
leaseID v3.LeaseID
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// SessionOption configures Session.
|
||||
type SessionOption func(*sessionOptions)
|
||||
|
||||
// WithTTL configures the session's TTL in seconds.
|
||||
// If TTL is <= 0, the default 60 seconds TTL will be used.
|
||||
func WithTTL(ttl int) SessionOption {
|
||||
return func(so *sessionOptions) {
|
||||
if ttl > 0 {
|
||||
so.ttl = ttl
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithLease specifies the existing leaseID to be used for the session.
|
||||
// This is useful in process restart scenario, for example, to reclaim
|
||||
// leadership from an election prior to restart.
|
||||
func WithLease(leaseID v3.LeaseID) SessionOption {
|
||||
return func(so *sessionOptions) {
|
||||
so.leaseID = leaseID
|
||||
}
|
||||
}
|
||||
|
||||
// WithContext assigns a context to the session instead of defaulting to
|
||||
// using the client context. This is useful for canceling NewSession and
|
||||
// Close operations immediately without having to close the client. If the
|
||||
// context is canceled before Close() completes, the session's lease will be
|
||||
// abandoned and left to expire instead of being revoked.
|
||||
func WithContext(ctx context.Context) SessionOption {
|
||||
return func(so *sessionOptions) {
|
||||
so.ctx = ctx
|
||||
}
|
||||
}
|
388
vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go
generated
vendored
Normal file
388
vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go
generated
vendored
Normal file
@ -0,0 +1,388 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// STM is an interface for software transactional memory.
|
||||
type STM interface {
|
||||
// Get returns the value for a key and inserts the key in the txn's read set.
|
||||
// If Get fails, it aborts the transaction with an error, never returning.
|
||||
Get(key ...string) string
|
||||
// Put adds a value for a key to the write set.
|
||||
Put(key, val string, opts ...v3.OpOption)
|
||||
// Rev returns the revision of a key in the read set.
|
||||
Rev(key string) int64
|
||||
// Del deletes a key.
|
||||
Del(key string)
|
||||
|
||||
// commit attempts to apply the txn's changes to the server.
|
||||
commit() *v3.TxnResponse
|
||||
reset()
|
||||
}
|
||||
|
||||
// Isolation is an enumeration of transactional isolation levels which
|
||||
// describes how transactions should interfere and conflict.
|
||||
type Isolation int
|
||||
|
||||
const (
|
||||
// SerializableSnapshot provides serializable isolation and also checks
|
||||
// for write conflicts.
|
||||
SerializableSnapshot Isolation = iota
|
||||
// Serializable reads within the same transaction attempt return data
|
||||
// from the at the revision of the first read.
|
||||
Serializable
|
||||
// RepeatableReads reads within the same transaction attempt always
|
||||
// return the same data.
|
||||
RepeatableReads
|
||||
// ReadCommitted reads keys from any committed revision.
|
||||
ReadCommitted
|
||||
)
|
||||
|
||||
// stmError safely passes STM errors through panic to the STM error channel.
|
||||
type stmError struct{ err error }
|
||||
|
||||
type stmOptions struct {
|
||||
iso Isolation
|
||||
ctx context.Context
|
||||
prefetch []string
|
||||
}
|
||||
|
||||
type stmOption func(*stmOptions)
|
||||
|
||||
// WithIsolation specifies the transaction isolation level.
|
||||
func WithIsolation(lvl Isolation) stmOption {
|
||||
return func(so *stmOptions) { so.iso = lvl }
|
||||
}
|
||||
|
||||
// WithAbortContext specifies the context for permanently aborting the transaction.
|
||||
func WithAbortContext(ctx context.Context) stmOption {
|
||||
return func(so *stmOptions) { so.ctx = ctx }
|
||||
}
|
||||
|
||||
// WithPrefetch is a hint to prefetch a list of keys before trying to apply.
|
||||
// If an STM transaction will unconditionally fetch a set of keys, prefetching
|
||||
// those keys will save the round-trip cost from requesting each key one by one
|
||||
// with Get().
|
||||
func WithPrefetch(keys ...string) stmOption {
|
||||
return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
|
||||
}
|
||||
|
||||
// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
|
||||
func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
|
||||
opts := &stmOptions{ctx: c.Ctx()}
|
||||
for _, f := range so {
|
||||
f(opts)
|
||||
}
|
||||
if len(opts.prefetch) != 0 {
|
||||
f := apply
|
||||
apply = func(s STM) error {
|
||||
s.Get(opts.prefetch...)
|
||||
return f(s)
|
||||
}
|
||||
}
|
||||
return runSTM(mkSTM(c, opts), apply)
|
||||
}
|
||||
|
||||
func mkSTM(c *v3.Client, opts *stmOptions) STM {
|
||||
switch opts.iso {
|
||||
case SerializableSnapshot:
|
||||
s := &stmSerializable{
|
||||
stm: stm{client: c, ctx: opts.ctx},
|
||||
prefetch: make(map[string]*v3.GetResponse),
|
||||
}
|
||||
s.conflicts = func() []v3.Cmp {
|
||||
return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...)
|
||||
}
|
||||
return s
|
||||
case Serializable:
|
||||
s := &stmSerializable{
|
||||
stm: stm{client: c, ctx: opts.ctx},
|
||||
prefetch: make(map[string]*v3.GetResponse),
|
||||
}
|
||||
s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
|
||||
return s
|
||||
case RepeatableReads:
|
||||
s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
|
||||
s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
|
||||
return s
|
||||
case ReadCommitted:
|
||||
s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
|
||||
s.conflicts = func() []v3.Cmp { return nil }
|
||||
return s
|
||||
default:
|
||||
panic("unsupported stm")
|
||||
}
|
||||
}
|
||||
|
||||
type stmResponse struct {
|
||||
resp *v3.TxnResponse
|
||||
err error
|
||||
}
|
||||
|
||||
func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||
outc := make(chan stmResponse, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
e, ok := r.(stmError)
|
||||
if !ok {
|
||||
// client apply panicked
|
||||
panic(r)
|
||||
}
|
||||
outc <- stmResponse{nil, e.err}
|
||||
}
|
||||
}()
|
||||
var out stmResponse
|
||||
for {
|
||||
s.reset()
|
||||
if out.err = apply(s); out.err != nil {
|
||||
break
|
||||
}
|
||||
if out.resp = s.commit(); out.resp != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
outc <- out
|
||||
}()
|
||||
r := <-outc
|
||||
return r.resp, r.err
|
||||
}
|
||||
|
||||
// stm implements repeatable-read software transactional memory over etcd
|
||||
type stm struct {
|
||||
client *v3.Client
|
||||
ctx context.Context
|
||||
// rset holds read key values and revisions
|
||||
rset readSet
|
||||
// wset holds overwritten keys and their values
|
||||
wset writeSet
|
||||
// getOpts are the opts used for gets
|
||||
getOpts []v3.OpOption
|
||||
// conflicts computes the current conflicts on the txn
|
||||
conflicts func() []v3.Cmp
|
||||
}
|
||||
|
||||
type stmPut struct {
|
||||
val string
|
||||
op v3.Op
|
||||
}
|
||||
|
||||
type readSet map[string]*v3.GetResponse
|
||||
|
||||
func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
|
||||
for i, resp := range txnresp.Responses {
|
||||
rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange())
|
||||
}
|
||||
}
|
||||
|
||||
// first returns the store revision from the first fetch
|
||||
func (rs readSet) first() int64 {
|
||||
ret := int64(math.MaxInt64 - 1)
|
||||
for _, resp := range rs {
|
||||
if rev := resp.Header.Revision; rev < ret {
|
||||
ret = rev
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// cmps guards the txn from updates to read set
|
||||
func (rs readSet) cmps() []v3.Cmp {
|
||||
cmps := make([]v3.Cmp, 0, len(rs))
|
||||
for k, rk := range rs {
|
||||
cmps = append(cmps, isKeyCurrent(k, rk))
|
||||
}
|
||||
return cmps
|
||||
}
|
||||
|
||||
type writeSet map[string]stmPut
|
||||
|
||||
func (ws writeSet) get(keys ...string) *stmPut {
|
||||
for _, key := range keys {
|
||||
if wv, ok := ws[key]; ok {
|
||||
return &wv
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// cmps returns a cmp list testing no writes have happened past rev
|
||||
func (ws writeSet) cmps(rev int64) []v3.Cmp {
|
||||
cmps := make([]v3.Cmp, 0, len(ws))
|
||||
for key := range ws {
|
||||
cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
|
||||
}
|
||||
return cmps
|
||||
}
|
||||
|
||||
// puts is the list of ops for all pending writes
|
||||
func (ws writeSet) puts() []v3.Op {
|
||||
puts := make([]v3.Op, 0, len(ws))
|
||||
for _, v := range ws {
|
||||
puts = append(puts, v.op)
|
||||
}
|
||||
return puts
|
||||
}
|
||||
|
||||
func (s *stm) Get(keys ...string) string {
|
||||
if wv := s.wset.get(keys...); wv != nil {
|
||||
return wv.val
|
||||
}
|
||||
return respToValue(s.fetch(keys...))
|
||||
}
|
||||
|
||||
func (s *stm) Put(key, val string, opts ...v3.OpOption) {
|
||||
s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
|
||||
}
|
||||
|
||||
func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
|
||||
|
||||
func (s *stm) Rev(key string) int64 {
|
||||
if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
|
||||
return resp.Kvs[0].ModRevision
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *stm) commit() *v3.TxnResponse {
|
||||
txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit()
|
||||
if err != nil {
|
||||
panic(stmError{err})
|
||||
}
|
||||
if txnresp.Succeeded {
|
||||
return txnresp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stm) fetch(keys ...string) *v3.GetResponse {
|
||||
if len(keys) == 0 {
|
||||
return nil
|
||||
}
|
||||
ops := make([]v3.Op, len(keys))
|
||||
for i, key := range keys {
|
||||
if resp, ok := s.rset[key]; ok {
|
||||
return resp
|
||||
}
|
||||
ops[i] = v3.OpGet(key, s.getOpts...)
|
||||
}
|
||||
txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit()
|
||||
if err != nil {
|
||||
panic(stmError{err})
|
||||
}
|
||||
s.rset.add(keys, txnresp)
|
||||
return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange())
|
||||
}
|
||||
|
||||
func (s *stm) reset() {
|
||||
s.rset = make(map[string]*v3.GetResponse)
|
||||
s.wset = make(map[string]stmPut)
|
||||
}
|
||||
|
||||
type stmSerializable struct {
|
||||
stm
|
||||
prefetch map[string]*v3.GetResponse
|
||||
}
|
||||
|
||||
func (s *stmSerializable) Get(keys ...string) string {
|
||||
if wv := s.wset.get(keys...); wv != nil {
|
||||
return wv.val
|
||||
}
|
||||
firstRead := len(s.rset) == 0
|
||||
for _, key := range keys {
|
||||
if resp, ok := s.prefetch[key]; ok {
|
||||
delete(s.prefetch, key)
|
||||
s.rset[key] = resp
|
||||
}
|
||||
}
|
||||
resp := s.stm.fetch(keys...)
|
||||
if firstRead {
|
||||
// txn's base revision is defined by the first read
|
||||
s.getOpts = []v3.OpOption{
|
||||
v3.WithRev(resp.Header.Revision),
|
||||
v3.WithSerializable(),
|
||||
}
|
||||
}
|
||||
return respToValue(resp)
|
||||
}
|
||||
|
||||
func (s *stmSerializable) Rev(key string) int64 {
|
||||
s.Get(key)
|
||||
return s.stm.Rev(key)
|
||||
}
|
||||
|
||||
func (s *stmSerializable) gets() ([]string, []v3.Op) {
|
||||
keys := make([]string, 0, len(s.rset))
|
||||
ops := make([]v3.Op, 0, len(s.rset))
|
||||
for k := range s.rset {
|
||||
keys = append(keys, k)
|
||||
ops = append(ops, v3.OpGet(k))
|
||||
}
|
||||
return keys, ops
|
||||
}
|
||||
|
||||
func (s *stmSerializable) commit() *v3.TxnResponse {
|
||||
keys, getops := s.gets()
|
||||
txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...)
|
||||
// use Else to prefetch keys in case of conflict to save a round trip
|
||||
txnresp, err := txn.Else(getops...).Commit()
|
||||
if err != nil {
|
||||
panic(stmError{err})
|
||||
}
|
||||
if txnresp.Succeeded {
|
||||
return txnresp
|
||||
}
|
||||
// load prefetch with Else data
|
||||
s.rset.add(keys, txnresp)
|
||||
s.prefetch = s.rset
|
||||
s.getOpts = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
|
||||
if len(r.Kvs) != 0 {
|
||||
return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
|
||||
}
|
||||
return v3.Compare(v3.ModRevision(k), "=", 0)
|
||||
}
|
||||
|
||||
func respToValue(resp *v3.GetResponse) string {
|
||||
if resp == nil || len(resp.Kvs) == 0 {
|
||||
return ""
|
||||
}
|
||||
return string(resp.Kvs[0].Value)
|
||||
}
|
||||
|
||||
// NewSTMRepeatable is deprecated.
|
||||
func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
|
||||
}
|
||||
|
||||
// NewSTMSerializable is deprecated.
|
||||
func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable))
|
||||
}
|
||||
|
||||
// NewSTMReadCommitted is deprecated.
|
||||
func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted))
|
||||
}
|
107
vendor/github.com/coreos/etcd/clientv3/config.go
generated
vendored
107
vendor/github.com/coreos/etcd/clientv3/config.go
generated
vendored
@ -16,98 +16,47 @@ package clientv3
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/pkg/tlsutil"
|
||||
"github.com/ghodss/yaml"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
// Endpoints is a list of URLs
|
||||
Endpoints []string
|
||||
// Endpoints is a list of URLs.
|
||||
Endpoints []string `json:"endpoints"`
|
||||
|
||||
// AutoSyncInterval is the interval to update endpoints with its latest members.
|
||||
// 0 disables auto-sync. By default auto-sync is disabled.
|
||||
AutoSyncInterval time.Duration
|
||||
AutoSyncInterval time.Duration `json:"auto-sync-interval"`
|
||||
|
||||
// DialTimeout is the timeout for failing to establish a connection.
|
||||
DialTimeout time.Duration
|
||||
DialTimeout time.Duration `json:"dial-timeout"`
|
||||
|
||||
// DialKeepAliveTime is the time in seconds after which client pings the server to see if
|
||||
// transport is alive.
|
||||
DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
|
||||
|
||||
// DialKeepAliveTimeout is the time in seconds that the client waits for a response for the
|
||||
// keep-alive probe. If the response is not received in this time, the connection is closed.
|
||||
DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
|
||||
|
||||
// TLS holds the client secure credentials, if any.
|
||||
TLS *tls.Config
|
||||
|
||||
// Username is a username for authentication
|
||||
Username string
|
||||
// Username is a user name for authentication.
|
||||
Username string `json:"username"`
|
||||
|
||||
// Password is a password for authentication
|
||||
Password string
|
||||
}
|
||||
|
||||
type yamlConfig struct {
|
||||
Endpoints []string `json:"endpoints"`
|
||||
AutoSyncInterval time.Duration `json:"auto-sync-interval"`
|
||||
DialTimeout time.Duration `json:"dial-timeout"`
|
||||
InsecureTransport bool `json:"insecure-transport"`
|
||||
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
|
||||
Certfile string `json:"cert-file"`
|
||||
Keyfile string `json:"key-file"`
|
||||
CAfile string `json:"ca-file"`
|
||||
}
|
||||
|
||||
func configFromFile(fpath string) (*Config, error) {
|
||||
b, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
yc := &yamlConfig{}
|
||||
|
||||
err = yaml.Unmarshal(b, yc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &Config{
|
||||
Endpoints: yc.Endpoints,
|
||||
AutoSyncInterval: yc.AutoSyncInterval,
|
||||
DialTimeout: yc.DialTimeout,
|
||||
}
|
||||
|
||||
if yc.InsecureTransport {
|
||||
cfg.TLS = nil
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
var (
|
||||
cert *tls.Certificate
|
||||
cp *x509.CertPool
|
||||
)
|
||||
|
||||
if yc.Certfile != "" && yc.Keyfile != "" {
|
||||
cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if yc.CAfile != "" {
|
||||
cp, err = tlsutil.NewCertPool([]string{yc.CAfile})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
tlscfg := &tls.Config{
|
||||
MinVersion: tls.VersionTLS10,
|
||||
InsecureSkipVerify: yc.InsecureSkipTLSVerify,
|
||||
RootCAs: cp,
|
||||
}
|
||||
if cert != nil {
|
||||
tlscfg.Certificates = []tls.Certificate{*cert}
|
||||
}
|
||||
cfg.TLS = tlscfg
|
||||
|
||||
return cfg, nil
|
||||
// Password is a password for authentication.
|
||||
Password string `json:"password"`
|
||||
|
||||
// RejectOldCluster when set will refuse to create a client against an outdated cluster.
|
||||
RejectOldCluster bool `json:"reject-old-cluster"`
|
||||
|
||||
// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
|
||||
DialOptions []grpc.DialOption
|
||||
|
||||
// Context is the default client context; it can be used to cancel grpc dial out and
|
||||
// other operations that do not have an explicit context.
|
||||
Context context.Context
|
||||
}
|
||||
|
2
vendor/github.com/coreos/etcd/clientv3/doc.go
generated
vendored
2
vendor/github.com/coreos/etcd/clientv3/doc.go
generated
vendored
@ -28,7 +28,7 @@
|
||||
// Make sure to close the client after using it. If the client is not closed, the
|
||||
// connection will have leaky goroutines.
|
||||
//
|
||||
// To specify client request timeout, pass context.WithTimeout to APIs:
|
||||
// To specify a client request timeout, wrap the context with context.WithTimeout:
|
||||
//
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
|
||||
|
627
vendor/github.com/coreos/etcd/clientv3/health_balancer.go
generated
vendored
Normal file
627
vendor/github.com/coreos/etcd/clientv3/health_balancer.go
generated
vendored
Normal file
@ -0,0 +1,627 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
minHealthRetryDuration = 3 * time.Second
|
||||
unknownService = "unknown service grpc.health.v1.Health"
|
||||
)
|
||||
|
||||
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
|
||||
// any active connection to endpoints at the time.
|
||||
// This error is returned only when opts.BlockingWait is true.
|
||||
var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available")
|
||||
|
||||
type healthCheckFunc func(ep string) (bool, error)
|
||||
|
||||
type notifyMsg int
|
||||
|
||||
const (
|
||||
notifyReset notifyMsg = iota
|
||||
notifyNext
|
||||
)
|
||||
|
||||
// healthBalancer does the bare minimum to expose multiple eps
|
||||
// to the grpc reconnection code path
|
||||
type healthBalancer struct {
|
||||
// addrs are the client's endpoint addresses for grpc
|
||||
addrs []grpc.Address
|
||||
|
||||
// eps holds the raw endpoints from the client
|
||||
eps []string
|
||||
|
||||
// notifyCh notifies grpc of the set of addresses for connecting
|
||||
notifyCh chan []grpc.Address
|
||||
|
||||
// readyc closes once the first connection is up
|
||||
readyc chan struct{}
|
||||
readyOnce sync.Once
|
||||
|
||||
// healthCheck checks an endpoint's health.
|
||||
healthCheck healthCheckFunc
|
||||
healthCheckTimeout time.Duration
|
||||
|
||||
unhealthyMu sync.RWMutex
|
||||
unhealthyHostPorts map[string]time.Time
|
||||
|
||||
// mu protects all fields below.
|
||||
mu sync.RWMutex
|
||||
|
||||
// upc closes when pinAddr transitions from empty to non-empty or the balancer closes.
|
||||
upc chan struct{}
|
||||
|
||||
// downc closes when grpc calls down() on pinAddr
|
||||
downc chan struct{}
|
||||
|
||||
// stopc is closed to signal updateNotifyLoop should stop.
|
||||
stopc chan struct{}
|
||||
stopOnce sync.Once
|
||||
wg sync.WaitGroup
|
||||
|
||||
// donec closes when all goroutines are exited
|
||||
donec chan struct{}
|
||||
|
||||
// updateAddrsC notifies updateNotifyLoop to update addrs.
|
||||
updateAddrsC chan notifyMsg
|
||||
|
||||
// grpc issues TLS cert checks using the string passed into dial so
|
||||
// that string must be the host. To recover the full scheme://host URL,
|
||||
// have a map from hosts to the original endpoint.
|
||||
hostPort2ep map[string]string
|
||||
|
||||
// pinAddr is the currently pinned address; set to the empty string on
|
||||
// initialization and shutdown.
|
||||
pinAddr string
|
||||
|
||||
closed bool
|
||||
}
|
||||
|
||||
func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer {
|
||||
notifyCh := make(chan []grpc.Address)
|
||||
addrs := eps2addrs(eps)
|
||||
hb := &healthBalancer{
|
||||
addrs: addrs,
|
||||
eps: eps,
|
||||
notifyCh: notifyCh,
|
||||
readyc: make(chan struct{}),
|
||||
healthCheck: hc,
|
||||
unhealthyHostPorts: make(map[string]time.Time),
|
||||
upc: make(chan struct{}),
|
||||
stopc: make(chan struct{}),
|
||||
downc: make(chan struct{}),
|
||||
donec: make(chan struct{}),
|
||||
updateAddrsC: make(chan notifyMsg),
|
||||
hostPort2ep: getHostPort2ep(eps),
|
||||
}
|
||||
if timeout < minHealthRetryDuration {
|
||||
timeout = minHealthRetryDuration
|
||||
}
|
||||
hb.healthCheckTimeout = timeout
|
||||
|
||||
close(hb.downc)
|
||||
go hb.updateNotifyLoop()
|
||||
hb.wg.Add(1)
|
||||
go func() {
|
||||
defer hb.wg.Done()
|
||||
hb.updateUnhealthy()
|
||||
}()
|
||||
return hb
|
||||
}
|
||||
|
||||
func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
|
||||
|
||||
func (b *healthBalancer) ConnectNotify() <-chan struct{} {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.upc
|
||||
}
|
||||
|
||||
func (b *healthBalancer) ready() <-chan struct{} { return b.readyc }
|
||||
|
||||
func (b *healthBalancer) endpoint(hostPort string) string {
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
return b.hostPort2ep[hostPort]
|
||||
}
|
||||
|
||||
func (b *healthBalancer) pinned() string {
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
return b.pinAddr
|
||||
}
|
||||
|
||||
func (b *healthBalancer) hostPortError(hostPort string, err error) {
|
||||
if b.endpoint(hostPort) == "" {
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
b.unhealthyMu.Lock()
|
||||
b.unhealthyHostPorts[hostPort] = time.Now()
|
||||
b.unhealthyMu.Unlock()
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) removeUnhealthy(hostPort, msg string) {
|
||||
if b.endpoint(hostPort) == "" {
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
b.unhealthyMu.Lock()
|
||||
delete(b.unhealthyHostPorts, hostPort)
|
||||
b.unhealthyMu.Unlock()
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) countUnhealthy() (count int) {
|
||||
b.unhealthyMu.RLock()
|
||||
count = len(b.unhealthyHostPorts)
|
||||
b.unhealthyMu.RUnlock()
|
||||
return count
|
||||
}
|
||||
|
||||
func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) {
|
||||
b.unhealthyMu.RLock()
|
||||
_, unhealthy = b.unhealthyHostPorts[hostPort]
|
||||
b.unhealthyMu.RUnlock()
|
||||
return unhealthy
|
||||
}
|
||||
|
||||
func (b *healthBalancer) cleanupUnhealthy() {
|
||||
b.unhealthyMu.Lock()
|
||||
for k, v := range b.unhealthyHostPorts {
|
||||
if time.Since(v) > b.healthCheckTimeout {
|
||||
delete(b.unhealthyHostPorts, k)
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout)
|
||||
}
|
||||
}
|
||||
}
|
||||
b.unhealthyMu.Unlock()
|
||||
}
|
||||
|
||||
func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) {
|
||||
unhealthyCnt := b.countUnhealthy()
|
||||
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
|
||||
hbAddrs := b.addrs
|
||||
if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) {
|
||||
liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep))
|
||||
for k := range b.hostPort2ep {
|
||||
liveHostPorts[k] = struct{}{}
|
||||
}
|
||||
return hbAddrs, liveHostPorts
|
||||
}
|
||||
|
||||
addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt)
|
||||
liveHostPorts := make(map[string]struct{}, len(addrs))
|
||||
for _, addr := range b.addrs {
|
||||
if !b.isUnhealthy(addr.Addr) {
|
||||
addrs = append(addrs, addr)
|
||||
liveHostPorts[addr.Addr] = struct{}{}
|
||||
}
|
||||
}
|
||||
return addrs, liveHostPorts
|
||||
}
|
||||
|
||||
func (b *healthBalancer) updateUnhealthy() {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(b.healthCheckTimeout):
|
||||
b.cleanupUnhealthy()
|
||||
pinned := b.pinned()
|
||||
if pinned == "" || b.isUnhealthy(pinned) {
|
||||
select {
|
||||
case b.updateAddrsC <- notifyNext:
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) updateAddrs(eps ...string) {
|
||||
np := getHostPort2ep(eps)
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
match := len(np) == len(b.hostPort2ep)
|
||||
if match {
|
||||
for k, v := range np {
|
||||
if b.hostPort2ep[k] != v {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if match {
|
||||
// same endpoints, so no need to update address
|
||||
return
|
||||
}
|
||||
|
||||
b.hostPort2ep = np
|
||||
b.addrs, b.eps = eps2addrs(eps), eps
|
||||
|
||||
b.unhealthyMu.Lock()
|
||||
b.unhealthyHostPorts = make(map[string]time.Time)
|
||||
b.unhealthyMu.Unlock()
|
||||
}
|
||||
|
||||
func (b *healthBalancer) next() {
|
||||
b.mu.RLock()
|
||||
downc := b.downc
|
||||
b.mu.RUnlock()
|
||||
select {
|
||||
case b.updateAddrsC <- notifyNext:
|
||||
case <-b.stopc:
|
||||
}
|
||||
// wait until disconnect so new RPCs are not issued on old connection
|
||||
select {
|
||||
case <-downc:
|
||||
case <-b.stopc:
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) updateNotifyLoop() {
|
||||
defer close(b.donec)
|
||||
|
||||
for {
|
||||
b.mu.RLock()
|
||||
upc, downc, addr := b.upc, b.downc, b.pinAddr
|
||||
b.mu.RUnlock()
|
||||
// downc or upc should be closed
|
||||
select {
|
||||
case <-downc:
|
||||
downc = nil
|
||||
default:
|
||||
}
|
||||
select {
|
||||
case <-upc:
|
||||
upc = nil
|
||||
default:
|
||||
}
|
||||
switch {
|
||||
case downc == nil && upc == nil:
|
||||
// stale
|
||||
select {
|
||||
case <-b.stopc:
|
||||
return
|
||||
default:
|
||||
}
|
||||
case downc == nil:
|
||||
b.notifyAddrs(notifyReset)
|
||||
select {
|
||||
case <-upc:
|
||||
case msg := <-b.updateAddrsC:
|
||||
b.notifyAddrs(msg)
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
case upc == nil:
|
||||
select {
|
||||
// close connections that are not the pinned address
|
||||
case b.notifyCh <- []grpc.Address{{Addr: addr}}:
|
||||
case <-downc:
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-downc:
|
||||
b.notifyAddrs(notifyReset)
|
||||
case msg := <-b.updateAddrsC:
|
||||
b.notifyAddrs(msg)
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) notifyAddrs(msg notifyMsg) {
|
||||
if msg == notifyNext {
|
||||
select {
|
||||
case b.notifyCh <- []grpc.Address{}:
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
b.mu.RLock()
|
||||
pinAddr := b.pinAddr
|
||||
downc := b.downc
|
||||
b.mu.RUnlock()
|
||||
addrs, hostPorts := b.liveAddrs()
|
||||
|
||||
var waitDown bool
|
||||
if pinAddr != "" {
|
||||
_, ok := hostPorts[pinAddr]
|
||||
waitDown = !ok
|
||||
}
|
||||
|
||||
select {
|
||||
case b.notifyCh <- addrs:
|
||||
if waitDown {
|
||||
select {
|
||||
case <-downc:
|
||||
case <-b.stopc:
|
||||
}
|
||||
}
|
||||
case <-b.stopc:
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) Up(addr grpc.Address) func(error) {
|
||||
if !b.mayPin(addr) {
|
||||
return func(err error) {}
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
// gRPC might call Up after it called Close. We add this check
|
||||
// to "fix" it up at application layer. Otherwise, will panic
|
||||
// if b.upc is already closed.
|
||||
if b.closed {
|
||||
return func(err error) {}
|
||||
}
|
||||
|
||||
// gRPC might call Up on a stale address.
|
||||
// Prevent updating pinAddr with a stale address.
|
||||
if !hasAddr(b.addrs, addr.Addr) {
|
||||
return func(err error) {}
|
||||
}
|
||||
|
||||
if b.pinAddr != "" {
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr)
|
||||
}
|
||||
return func(err error) {}
|
||||
}
|
||||
|
||||
// notify waiting Get()s and pin first connected address
|
||||
close(b.upc)
|
||||
b.downc = make(chan struct{})
|
||||
b.pinAddr = addr.Addr
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: pin %q", addr.Addr)
|
||||
}
|
||||
|
||||
// notify client that a connection is up
|
||||
b.readyOnce.Do(func() { close(b.readyc) })
|
||||
|
||||
return func(err error) {
|
||||
// If connected to a black hole endpoint or a killed server, the gRPC ping
|
||||
// timeout will induce a network I/O error, and retrying until success;
|
||||
// finding healthy endpoint on retry could take several timeouts and redials.
|
||||
// To avoid wasting retries, gray-list unhealthy endpoints.
|
||||
b.hostPortError(addr.Addr, err)
|
||||
|
||||
b.mu.Lock()
|
||||
b.upc = make(chan struct{})
|
||||
close(b.downc)
|
||||
b.pinAddr = ""
|
||||
b.mu.Unlock()
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) mayPin(addr grpc.Address) bool {
|
||||
if b.endpoint(addr.Addr) == "" { // stale host:port
|
||||
return false
|
||||
}
|
||||
|
||||
b.unhealthyMu.RLock()
|
||||
unhealthyCnt := len(b.unhealthyHostPorts)
|
||||
failedTime, bad := b.unhealthyHostPorts[addr.Addr]
|
||||
b.unhealthyMu.RUnlock()
|
||||
|
||||
b.mu.RLock()
|
||||
skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt
|
||||
b.mu.RUnlock()
|
||||
if skip || !bad {
|
||||
return true
|
||||
}
|
||||
|
||||
// prevent isolated member's endpoint from being infinitely retried, as follows:
|
||||
// 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm
|
||||
// 2. balancer 'Up' unpins with grpc: failed with network I/O error
|
||||
// 3. grpc-healthcheck still SERVING, thus retry to pin
|
||||
// instead, return before grpc-healthcheck if failed within healthcheck timeout
|
||||
if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout {
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if ok, _ := b.healthCheck(addr.Addr); ok {
|
||||
b.removeUnhealthy(addr.Addr, "health check success")
|
||||
return true
|
||||
}
|
||||
|
||||
b.hostPortError(addr.Addr, errors.New("health check failed"))
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
||||
var (
|
||||
addr string
|
||||
closed bool
|
||||
)
|
||||
|
||||
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
|
||||
// an address it has notified via Notify immediately instead of blocking.
|
||||
if !opts.BlockingWait {
|
||||
b.mu.RLock()
|
||||
closed = b.closed
|
||||
addr = b.pinAddr
|
||||
b.mu.RUnlock()
|
||||
if closed {
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
}
|
||||
if addr == "" {
|
||||
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
|
||||
}
|
||||
return grpc.Address{Addr: addr}, func() {}, nil
|
||||
}
|
||||
|
||||
for {
|
||||
b.mu.RLock()
|
||||
ch := b.upc
|
||||
b.mu.RUnlock()
|
||||
select {
|
||||
case <-ch:
|
||||
case <-b.donec:
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
case <-ctx.Done():
|
||||
return grpc.Address{Addr: ""}, nil, ctx.Err()
|
||||
}
|
||||
b.mu.RLock()
|
||||
closed = b.closed
|
||||
addr = b.pinAddr
|
||||
b.mu.RUnlock()
|
||||
// Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
|
||||
if closed {
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
}
|
||||
if addr != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
return grpc.Address{Addr: addr}, func() {}, nil
|
||||
}
|
||||
|
||||
func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
|
||||
|
||||
func (b *healthBalancer) Close() error {
|
||||
b.mu.Lock()
|
||||
// In case gRPC calls close twice. TODO: remove the checking
|
||||
// when we are sure that gRPC wont call close twice.
|
||||
if b.closed {
|
||||
b.mu.Unlock()
|
||||
<-b.donec
|
||||
return nil
|
||||
}
|
||||
b.closed = true
|
||||
b.stopOnce.Do(func() { close(b.stopc) })
|
||||
b.pinAddr = ""
|
||||
|
||||
// In the case of following scenario:
|
||||
// 1. upc is not closed; no pinned address
|
||||
// 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks
|
||||
// 3. client.conn.Close() calls balancer.Close(); closed = true
|
||||
// 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
|
||||
// we must close upc so Get() exits from blocking on upc
|
||||
select {
|
||||
case <-b.upc:
|
||||
default:
|
||||
// terminate all waiting Get()s
|
||||
close(b.upc)
|
||||
}
|
||||
|
||||
b.mu.Unlock()
|
||||
b.wg.Wait()
|
||||
|
||||
// wait for updateNotifyLoop to finish
|
||||
<-b.donec
|
||||
close(b.notifyCh)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func grpcHealthCheck(client *Client, ep string) (bool, error) {
|
||||
conn, err := client.dial(ep)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer conn.Close()
|
||||
cli := healthpb.NewHealthClient(conn)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{})
|
||||
cancel()
|
||||
if err != nil {
|
||||
if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable {
|
||||
if s.Message() == unknownService { // etcd < v3.3.0
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return resp.Status == healthpb.HealthCheckResponse_SERVING, nil
|
||||
}
|
||||
|
||||
func hasAddr(addrs []grpc.Address, targetAddr string) bool {
|
||||
for _, addr := range addrs {
|
||||
if targetAddr == addr.Addr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getHost(ep string) string {
|
||||
url, uerr := url.Parse(ep)
|
||||
if uerr != nil || !strings.Contains(ep, "://") {
|
||||
return ep
|
||||
}
|
||||
return url.Host
|
||||
}
|
||||
|
||||
func eps2addrs(eps []string) []grpc.Address {
|
||||
addrs := make([]grpc.Address, len(eps))
|
||||
for i := range eps {
|
||||
addrs[i].Addr = getHost(eps[i])
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
func getHostPort2ep(eps []string) map[string]string {
|
||||
hm := make(map[string]string, len(eps))
|
||||
for i := range eps {
|
||||
_, host, _ := parseEndpoint(eps[i])
|
||||
hm[host] = eps[i]
|
||||
}
|
||||
return hm
|
||||
}
|
54
vendor/github.com/coreos/etcd/clientv3/kv.go
generated
vendored
54
vendor/github.com/coreos/etcd/clientv3/kv.go
generated
vendored
@ -16,8 +16,8 @@ package clientv3
|
||||
|
||||
import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type (
|
||||
@ -32,7 +32,7 @@ type KV interface {
|
||||
// Put puts a key-value pair into etcd.
|
||||
// Note that key,value can be plain bytes array and string is
|
||||
// an immutable representation of that bytes array.
|
||||
// To get a string of bytes, do string([]byte(0x10, 0x20)).
|
||||
// To get a string of bytes, do string([]byte{0x10, 0x20}).
|
||||
Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
|
||||
|
||||
// Get retrieves keys.
|
||||
@ -51,11 +51,6 @@ type KV interface {
|
||||
// Compact compacts etcd KV history before the given rev.
|
||||
Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
|
||||
|
||||
// Do applies a single Op on KV without a transaction.
|
||||
// Do is useful when declaring operations to be issued at a later time
|
||||
// whereas Get/Put/Delete are for better suited for when the operation
|
||||
// should be immediately issued at time of declaration.
|
||||
|
||||
// Do applies a single Op on KV without a transaction.
|
||||
// Do is useful when creating arbitrary operations to be issued at a
|
||||
// later time; the user can range over the operations, calling Do to
|
||||
@ -71,11 +66,26 @@ type OpResponse struct {
|
||||
put *PutResponse
|
||||
get *GetResponse
|
||||
del *DeleteResponse
|
||||
txn *TxnResponse
|
||||
}
|
||||
|
||||
func (op OpResponse) Put() *PutResponse { return op.put }
|
||||
func (op OpResponse) Get() *GetResponse { return op.get }
|
||||
func (op OpResponse) Del() *DeleteResponse { return op.del }
|
||||
func (op OpResponse) Txn() *TxnResponse { return op.txn }
|
||||
|
||||
func (resp *PutResponse) OpResponse() OpResponse {
|
||||
return OpResponse{put: resp}
|
||||
}
|
||||
func (resp *GetResponse) OpResponse() OpResponse {
|
||||
return OpResponse{get: resp}
|
||||
}
|
||||
func (resp *DeleteResponse) OpResponse() OpResponse {
|
||||
return OpResponse{del: resp}
|
||||
}
|
||||
func (resp *TxnResponse) OpResponse() OpResponse {
|
||||
return OpResponse{txn: resp}
|
||||
}
|
||||
|
||||
type kv struct {
|
||||
remote pb.KVClient
|
||||
@ -120,35 +130,17 @@ func (kv *kv) Txn(ctx context.Context) Txn {
|
||||
}
|
||||
|
||||
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
|
||||
for {
|
||||
resp, err := kv.do(ctx, op)
|
||||
if err == nil {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
if isHaltErr(ctx, err) {
|
||||
return resp, toErr(ctx, err)
|
||||
}
|
||||
// do not retry on modifications
|
||||
if op.isWrite() {
|
||||
return resp, toErr(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
|
||||
var err error
|
||||
switch op.t {
|
||||
// TODO: handle other ops
|
||||
case tRange:
|
||||
var resp *pb.RangeResponse
|
||||
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false))
|
||||
resp, err = kv.remote.Range(ctx, op.toRangeRequest())
|
||||
if err == nil {
|
||||
return OpResponse{get: (*GetResponse)(resp)}, nil
|
||||
}
|
||||
case tPut:
|
||||
var resp *pb.PutResponse
|
||||
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
|
||||
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
|
||||
resp, err = kv.remote.Put(ctx, r)
|
||||
if err == nil {
|
||||
return OpResponse{put: (*PutResponse)(resp)}, nil
|
||||
@ -160,8 +152,14 @@ func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
|
||||
if err == nil {
|
||||
return OpResponse{del: (*DeleteResponse)(resp)}, nil
|
||||
}
|
||||
case tTxn:
|
||||
var resp *pb.TxnResponse
|
||||
resp, err = kv.remote.Txn(ctx, op.toTxnRequest())
|
||||
if err == nil {
|
||||
return OpResponse{txn: (*TxnResponse)(resp)}, nil
|
||||
}
|
||||
default:
|
||||
panic("Unknown op")
|
||||
}
|
||||
return OpResponse{}, err
|
||||
return OpResponse{}, toErr(ctx, err)
|
||||
}
|
||||
|
257
vendor/github.com/coreos/etcd/clientv3/lease.go
generated
vendored
257
vendor/github.com/coreos/etcd/clientv3/lease.go
generated
vendored
@ -20,8 +20,9 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
type (
|
||||
@ -29,7 +30,7 @@ type (
|
||||
LeaseID int64
|
||||
)
|
||||
|
||||
// LeaseGrantResponse is used to convert the protobuf grant response.
|
||||
// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
|
||||
type LeaseGrantResponse struct {
|
||||
*pb.ResponseHeader
|
||||
ID LeaseID
|
||||
@ -37,14 +38,14 @@ type LeaseGrantResponse struct {
|
||||
Error string
|
||||
}
|
||||
|
||||
// LeaseKeepAliveResponse is used to convert the protobuf keepalive response.
|
||||
// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
|
||||
type LeaseKeepAliveResponse struct {
|
||||
*pb.ResponseHeader
|
||||
ID LeaseID
|
||||
TTL int64
|
||||
}
|
||||
|
||||
// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response.
|
||||
// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
|
||||
type LeaseTimeToLiveResponse struct {
|
||||
*pb.ResponseHeader
|
||||
ID LeaseID `json:"id"`
|
||||
@ -59,6 +60,12 @@ type LeaseTimeToLiveResponse struct {
|
||||
Keys [][]byte `json:"keys"`
|
||||
}
|
||||
|
||||
// LeaseStatus represents a lease status.
|
||||
type LeaseStatus struct {
|
||||
ID LeaseID `json:"id"`
|
||||
// TODO: TTL int64
|
||||
}
|
||||
|
||||
const (
|
||||
// defaultTTL is the assumed lease TTL used for the first keepalive
|
||||
// deadline before the actual TTL is known to the client.
|
||||
@ -67,6 +74,9 @@ const (
|
||||
leaseResponseChSize = 16
|
||||
// NoLease is a lease ID for the absence of a lease.
|
||||
NoLease LeaseID = 0
|
||||
|
||||
// retryConnWait is how long to wait before retrying request due to an error
|
||||
retryConnWait = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
|
||||
@ -97,7 +107,7 @@ type Lease interface {
|
||||
// KeepAlive keeps the given lease alive forever.
|
||||
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
|
||||
|
||||
// KeepAliveOnce renews the lease once. In most of the cases, Keepalive
|
||||
// KeepAliveOnce renews the lease once. In most of the cases, KeepAlive
|
||||
// should be used instead of KeepAliveOnce.
|
||||
KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
|
||||
|
||||
@ -126,6 +136,9 @@ type lessor struct {
|
||||
// firstKeepAliveTimeout is the timeout for the first keepalive request
|
||||
// before the actual TTL is known to the lease client
|
||||
firstKeepAliveTimeout time.Duration
|
||||
|
||||
// firstKeepAliveOnce ensures stream starts after first KeepAlive call.
|
||||
firstKeepAliveOnce sync.Once
|
||||
}
|
||||
|
||||
// keepAlive multiplexes a keepalive for a lease over multiple channels
|
||||
@ -141,85 +154,62 @@ type keepAlive struct {
|
||||
}
|
||||
|
||||
func NewLease(c *Client) Lease {
|
||||
return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second)
|
||||
}
|
||||
|
||||
func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease {
|
||||
l := &lessor{
|
||||
donec: make(chan struct{}),
|
||||
keepAlives: make(map[LeaseID]*keepAlive),
|
||||
remote: RetryLeaseClient(c),
|
||||
firstKeepAliveTimeout: c.cfg.DialTimeout + time.Second,
|
||||
remote: remote,
|
||||
firstKeepAliveTimeout: keepAliveTimeout,
|
||||
}
|
||||
if l.firstKeepAliveTimeout == time.Second {
|
||||
l.firstKeepAliveTimeout = defaultTTL
|
||||
}
|
||||
|
||||
l.stopCtx, l.stopCancel = context.WithCancel(context.Background())
|
||||
go l.recvKeepAliveLoop()
|
||||
go l.deadlineLoop()
|
||||
reqLeaderCtx := WithRequireLeader(context.Background())
|
||||
l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
done := cancelWhenStop(cancel, l.stopCtx.Done())
|
||||
defer close(done)
|
||||
|
||||
for {
|
||||
r := &pb.LeaseGrantRequest{TTL: ttl}
|
||||
resp, err := l.remote.LeaseGrant(cctx, r)
|
||||
if err == nil {
|
||||
gresp := &LeaseGrantResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
Error: resp.Error,
|
||||
}
|
||||
return gresp, nil
|
||||
}
|
||||
if isHaltErr(cctx, err) {
|
||||
return nil, toErr(cctx, err)
|
||||
r := &pb.LeaseGrantRequest{TTL: ttl}
|
||||
resp, err := l.remote.LeaseGrant(ctx, r)
|
||||
if err == nil {
|
||||
gresp := &LeaseGrantResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
Error: resp.Error,
|
||||
}
|
||||
return gresp, nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
done := cancelWhenStop(cancel, l.stopCtx.Done())
|
||||
defer close(done)
|
||||
|
||||
for {
|
||||
r := &pb.LeaseRevokeRequest{ID: int64(id)}
|
||||
resp, err := l.remote.LeaseRevoke(cctx, r)
|
||||
|
||||
if err == nil {
|
||||
return (*LeaseRevokeResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
r := &pb.LeaseRevokeRequest{ID: int64(id)}
|
||||
resp, err := l.remote.LeaseRevoke(ctx, r)
|
||||
if err == nil {
|
||||
return (*LeaseRevokeResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
done := cancelWhenStop(cancel, l.stopCtx.Done())
|
||||
defer close(done)
|
||||
|
||||
for {
|
||||
r := toLeaseTimeToLiveRequest(id, opts...)
|
||||
resp, err := l.remote.LeaseTimeToLive(cctx, r, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
gresp := &LeaseTimeToLiveResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
GrantedTTL: resp.GrantedTTL,
|
||||
Keys: resp.Keys,
|
||||
}
|
||||
return gresp, nil
|
||||
}
|
||||
if isHaltErr(cctx, err) {
|
||||
return nil, toErr(cctx, err)
|
||||
r := toLeaseTimeToLiveRequest(id, opts...)
|
||||
resp, err := l.remote.LeaseTimeToLive(ctx, r)
|
||||
if err == nil {
|
||||
gresp := &LeaseTimeToLiveResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
GrantedTTL: resp.GrantedTTL,
|
||||
Keys: resp.Keys,
|
||||
}
|
||||
return gresp, nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
||||
@ -254,19 +244,19 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl
|
||||
l.mu.Unlock()
|
||||
|
||||
go l.keepAliveCtxCloser(id, ctx, ka.donec)
|
||||
l.firstKeepAliveOnce.Do(func() {
|
||||
go l.recvKeepAliveLoop()
|
||||
go l.deadlineLoop()
|
||||
})
|
||||
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
done := cancelWhenStop(cancel, l.stopCtx.Done())
|
||||
defer close(done)
|
||||
|
||||
for {
|
||||
resp, err := l.keepAliveOnce(cctx, id)
|
||||
resp, err := l.keepAliveOnce(ctx, id)
|
||||
if err == nil {
|
||||
if resp.TTL == 0 {
|
||||
if resp.TTL <= 0 {
|
||||
err = rpctypes.ErrLeaseNotFound
|
||||
}
|
||||
return resp, err
|
||||
@ -279,6 +269,8 @@ func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
|
||||
|
||||
func (l *lessor) Close() error {
|
||||
l.stopCancel()
|
||||
// close for synchronous teardown if stream goroutines never launched
|
||||
l.firstKeepAliveOnce.Do(func() { close(l.donec) })
|
||||
<-l.donec
|
||||
return nil
|
||||
}
|
||||
@ -315,11 +307,50 @@ func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-cha
|
||||
}
|
||||
}
|
||||
|
||||
// closeRequireLeader scans keepAlives for ctxs that have require leader
|
||||
// and closes the associated channels.
|
||||
func (l *lessor) closeRequireLeader() {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
for _, ka := range l.keepAlives {
|
||||
reqIdxs := 0
|
||||
// find all required leader channels, close, mark as nil
|
||||
for i, ctx := range ka.ctxs {
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
ks := md[rpctypes.MetadataRequireLeaderKey]
|
||||
if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
|
||||
continue
|
||||
}
|
||||
close(ka.chs[i])
|
||||
ka.chs[i] = nil
|
||||
reqIdxs++
|
||||
}
|
||||
if reqIdxs == 0 {
|
||||
continue
|
||||
}
|
||||
// remove all channels that required a leader from keepalive
|
||||
newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
|
||||
newCtxs := make([]context.Context, len(newChs))
|
||||
newIdx := 0
|
||||
for i := range ka.chs {
|
||||
if ka.chs[i] == nil {
|
||||
continue
|
||||
}
|
||||
newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
|
||||
newIdx++
|
||||
}
|
||||
ka.chs, ka.ctxs = newChs, newCtxs
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false))
|
||||
stream, err := l.remote.LeaseKeepAlive(cctx)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
@ -348,32 +379,50 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
|
||||
close(l.donec)
|
||||
l.loopErr = gerr
|
||||
for _, ka := range l.keepAlives {
|
||||
ka.Close()
|
||||
ka.close()
|
||||
}
|
||||
l.keepAlives = make(map[LeaseID]*keepAlive)
|
||||
l.mu.Unlock()
|
||||
}()
|
||||
|
||||
stream, serr := l.resetRecv()
|
||||
for serr == nil {
|
||||
resp, err := stream.Recv()
|
||||
for {
|
||||
stream, err := l.resetRecv()
|
||||
if err != nil {
|
||||
if isHaltErr(l.stopCtx, err) {
|
||||
if canceledByCaller(l.stopCtx, err) {
|
||||
return err
|
||||
}
|
||||
stream, serr = l.resetRecv()
|
||||
continue
|
||||
} else {
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
if err != nil {
|
||||
if canceledByCaller(l.stopCtx, err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
|
||||
l.closeRequireLeader()
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
l.recvKeepAlive(resp)
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(retryConnWait):
|
||||
continue
|
||||
case <-l.stopCtx.Done():
|
||||
return l.stopCtx.Err()
|
||||
}
|
||||
l.recvKeepAlive(resp)
|
||||
}
|
||||
return serr
|
||||
}
|
||||
|
||||
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
|
||||
// resetRecv opens a new lease stream and starts sending keep alive requests.
|
||||
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
|
||||
sctx, cancel := context.WithCancel(l.stopCtx)
|
||||
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
|
||||
if err = toErr(sctx, err); err != nil {
|
||||
stream, err := l.remote.LeaseKeepAlive(sctx)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
@ -381,7 +430,6 @@ func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.stream != nil && l.streamCancel != nil {
|
||||
l.stream.CloseSend()
|
||||
l.streamCancel()
|
||||
}
|
||||
|
||||
@ -411,7 +459,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
|
||||
if karesp.TTL <= 0 {
|
||||
// lease expired; close all keep alive channels
|
||||
delete(l.keepAlives, karesp.ID)
|
||||
ka.Close()
|
||||
ka.close()
|
||||
return
|
||||
}
|
||||
|
||||
@ -441,7 +489,7 @@ func (l *lessor) deadlineLoop() {
|
||||
for id, ka := range l.keepAlives {
|
||||
if ka.deadline.Before(now) {
|
||||
// waited too long for response; lease may be expired
|
||||
ka.Close()
|
||||
ka.close()
|
||||
delete(l.keepAlives, id)
|
||||
}
|
||||
}
|
||||
@ -449,19 +497,9 @@ func (l *lessor) deadlineLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream
|
||||
// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
|
||||
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
case <-stream.Context().Done():
|
||||
return
|
||||
case <-l.donec:
|
||||
return
|
||||
case <-l.stopCtx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
var tosend []LeaseID
|
||||
|
||||
now := time.Now()
|
||||
@ -480,29 +518,22 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
case <-stream.Context().Done():
|
||||
return
|
||||
case <-l.donec:
|
||||
return
|
||||
case <-l.stopCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ka *keepAlive) Close() {
|
||||
func (ka *keepAlive) close() {
|
||||
close(ka.donec)
|
||||
for _, ch := range ka.chs {
|
||||
close(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// cancelWhenStop calls cancel when the given stopc fires. It returns a done chan. done
|
||||
// should be closed when the work is finished. When done fires, cancelWhenStop will release
|
||||
// its internal resource.
|
||||
func cancelWhenStop(cancel context.CancelFunc, stopc <-chan struct{}) chan<- struct{} {
|
||||
done := make(chan struct{}, 1)
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case <-stopc:
|
||||
case <-done:
|
||||
}
|
||||
cancel()
|
||||
}()
|
||||
|
||||
return done
|
||||
}
|
||||
|
34
vendor/github.com/coreos/etcd/clientv3/logger.go
generated
vendored
34
vendor/github.com/coreos/etcd/clientv3/logger.go
generated
vendored
@ -16,36 +16,35 @@ package clientv3
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// Logger is the logger used by client library.
|
||||
// It implements grpclog.Logger interface.
|
||||
type Logger grpclog.Logger
|
||||
// It implements grpclog.LoggerV2 interface.
|
||||
type Logger grpclog.LoggerV2
|
||||
|
||||
var (
|
||||
logger settableLogger
|
||||
)
|
||||
|
||||
type settableLogger struct {
|
||||
l grpclog.Logger
|
||||
l grpclog.LoggerV2
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func init() {
|
||||
// disable client side logs by default
|
||||
logger.mu.Lock()
|
||||
logger.l = log.New(ioutil.Discard, "", 0)
|
||||
logger.l = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)
|
||||
|
||||
// logger has to override the grpclog at initialization so that
|
||||
// any changes to the grpclog go through logger with locking
|
||||
// instead of through SetLogger
|
||||
//
|
||||
// now updates only happen through settableLogger.set
|
||||
grpclog.SetLogger(&logger)
|
||||
grpclog.SetLoggerV2(&logger)
|
||||
logger.mu.Unlock()
|
||||
}
|
||||
|
||||
@ -62,6 +61,7 @@ func GetLogger() Logger {
|
||||
func (s *settableLogger) set(l Logger) {
|
||||
s.mu.Lock()
|
||||
logger.l = l
|
||||
grpclog.SetLoggerV2(&logger)
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
@ -72,11 +72,25 @@ func (s *settableLogger) get() Logger {
|
||||
return l
|
||||
}
|
||||
|
||||
// implement the grpclog.Logger interface
|
||||
// implement the grpclog.LoggerV2 interface
|
||||
|
||||
func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) }
|
||||
func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) }
|
||||
func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) }
|
||||
func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) }
|
||||
func (s *settableLogger) Warningf(format string, args ...interface{}) {
|
||||
s.get().Warningf(format, args...)
|
||||
}
|
||||
func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) }
|
||||
func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) }
|
||||
func (s *settableLogger) Errorf(format string, args ...interface{}) {
|
||||
s.get().Errorf(format, args...)
|
||||
}
|
||||
func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) }
|
||||
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
|
||||
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
|
||||
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
|
||||
func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) }
|
||||
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) }
|
||||
func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) }
|
||||
func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) }
|
||||
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
|
||||
func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) }
|
||||
func (s *settableLogger) V(l int) bool { return s.get().V(l) }
|
||||
|
59
vendor/github.com/coreos/etcd/clientv3/maintenance.go
generated
vendored
59
vendor/github.com/coreos/etcd/clientv3/maintenance.go
generated
vendored
@ -18,8 +18,8 @@ import (
|
||||
"io"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type (
|
||||
@ -36,7 +36,7 @@ type Maintenance interface {
|
||||
// AlarmDisarm disarms a given alarm.
|
||||
AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
|
||||
|
||||
// Defragment defragments storage backend of the etcd member with given endpoint.
|
||||
// Defragment releases wasted space from internal fragmentation on a given etcd member.
|
||||
// Defragment is only needed when deleting a large number of keys and want to reclaim
|
||||
// the resources.
|
||||
// Defragment is an expensive operation. User should avoid defragmenting multiple members
|
||||
@ -48,17 +48,36 @@ type Maintenance interface {
|
||||
// Status gets the status of the endpoint.
|
||||
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
|
||||
|
||||
// Snapshot provides a reader for a snapshot of a backend.
|
||||
// Snapshot provides a reader for a point-in-time snapshot of etcd.
|
||||
Snapshot(ctx context.Context) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
type maintenance struct {
|
||||
c *Client
|
||||
dial func(endpoint string) (pb.MaintenanceClient, func(), error)
|
||||
remote pb.MaintenanceClient
|
||||
}
|
||||
|
||||
func NewMaintenance(c *Client) Maintenance {
|
||||
return &maintenance{c: c, remote: pb.NewMaintenanceClient(c.conn)}
|
||||
return &maintenance{
|
||||
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
|
||||
conn, err := c.dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cancel := func() { conn.Close() }
|
||||
return RetryMaintenanceClient(c, conn), cancel, nil
|
||||
},
|
||||
remote: RetryMaintenanceClient(c, c.conn),
|
||||
}
|
||||
}
|
||||
|
||||
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient) Maintenance {
|
||||
return &maintenance{
|
||||
dial: func(string) (pb.MaintenanceClient, func(), error) {
|
||||
return remote, func() {}, nil
|
||||
},
|
||||
remote: remote,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
|
||||
@ -67,15 +86,11 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
|
||||
MemberID: 0, // all
|
||||
Alarm: pb.AlarmType_NONE, // all
|
||||
}
|
||||
for {
|
||||
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*AlarmResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
resp, err := m.remote.Alarm(ctx, req)
|
||||
if err == nil {
|
||||
return (*AlarmResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
|
||||
@ -101,7 +116,7 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
|
||||
resp, err := m.remote.Alarm(ctx, req)
|
||||
if err == nil {
|
||||
return (*AlarmResponse)(resp), nil
|
||||
}
|
||||
@ -109,13 +124,12 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR
|
||||
}
|
||||
|
||||
func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
|
||||
conn, err := m.c.Dial(endpoint)
|
||||
remote, cancel, err := m.dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
defer conn.Close()
|
||||
remote := pb.NewMaintenanceClient(conn)
|
||||
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false))
|
||||
defer cancel()
|
||||
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{})
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
@ -123,13 +137,12 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm
|
||||
}
|
||||
|
||||
func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
|
||||
conn, err := m.c.Dial(endpoint)
|
||||
remote, cancel, err := m.dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
defer conn.Close()
|
||||
remote := pb.NewMaintenanceClient(conn)
|
||||
resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false))
|
||||
defer cancel()
|
||||
resp, err := remote.Status(ctx, &pb.StatusRequest{})
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
@ -137,7 +150,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
|
||||
}
|
||||
|
||||
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
|
||||
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false))
|
||||
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{})
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
34
vendor/github.com/coreos/etcd/clientv3/namespace/BUILD
generated
vendored
Normal file
34
vendor/github.com/coreos/etcd/clientv3/namespace/BUILD
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"kv.go",
|
||||
"lease.go",
|
||||
"util.go",
|
||||
"watch.go",
|
||||
],
|
||||
importpath = "github.com/coreos/etcd/clientv3/namespace",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library",
|
||||
"//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
43
vendor/github.com/coreos/etcd/clientv3/namespace/doc.go
generated
vendored
Normal file
43
vendor/github.com/coreos/etcd/clientv3/namespace/doc.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package namespace is a clientv3 wrapper that translates all keys to begin
|
||||
// with a given prefix.
|
||||
//
|
||||
// First, create a client:
|
||||
//
|
||||
// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}})
|
||||
// if err != nil {
|
||||
// // handle error!
|
||||
// }
|
||||
//
|
||||
// Next, override the client interfaces:
|
||||
//
|
||||
// unprefixedKV := cli.KV
|
||||
// cli.KV = namespace.NewKV(cli.KV, "my-prefix/")
|
||||
// cli.Watcher = namespace.NewWatcher(cli.Watcher, "my-prefix/")
|
||||
// cli.Lease = namespace.NewLease(cli.Lease, "my-prefix/")
|
||||
//
|
||||
// Now calls using 'cli' will namespace / prefix all keys with "my-prefix/":
|
||||
//
|
||||
// cli.Put(context.TODO(), "abc", "123")
|
||||
// resp, _ := unprefixedKV.Get(context.TODO(), "my-prefix/abc")
|
||||
// fmt.Printf("%s\n", resp.Kvs[0].Value)
|
||||
// // Output: 123
|
||||
// unprefixedKV.Put(context.TODO(), "my-prefix/abc", "456")
|
||||
// resp, _ = cli.Get("abc")
|
||||
// fmt.Printf("%s\n", resp.Kvs[0].Value)
|
||||
// // Output: 456
|
||||
//
|
||||
package namespace
|
189
vendor/github.com/coreos/etcd/clientv3/namespace/kv.go
generated
vendored
Normal file
189
vendor/github.com/coreos/etcd/clientv3/namespace/kv.go
generated
vendored
Normal file
@ -0,0 +1,189 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package namespace
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type kvPrefix struct {
|
||||
clientv3.KV
|
||||
pfx string
|
||||
}
|
||||
|
||||
// NewKV wraps a KV instance so that all requests
|
||||
// are prefixed with a given string.
|
||||
func NewKV(kv clientv3.KV, prefix string) clientv3.KV {
|
||||
return &kvPrefix{kv, prefix}
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
|
||||
if len(key) == 0 {
|
||||
return nil, rpctypes.ErrEmptyKey
|
||||
}
|
||||
op := kv.prefixOp(clientv3.OpPut(key, val, opts...))
|
||||
r, err := kv.KV.Do(ctx, op)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
put := r.Put()
|
||||
kv.unprefixPutResponse(put)
|
||||
return put, nil
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
|
||||
if len(key) == 0 {
|
||||
return nil, rpctypes.ErrEmptyKey
|
||||
}
|
||||
r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpGet(key, opts...)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
get := r.Get()
|
||||
kv.unprefixGetResponse(get)
|
||||
return get, nil
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) {
|
||||
if len(key) == 0 {
|
||||
return nil, rpctypes.ErrEmptyKey
|
||||
}
|
||||
r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpDelete(key, opts...)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
del := r.Del()
|
||||
kv.unprefixDeleteResponse(del)
|
||||
return del, nil
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) {
|
||||
if len(op.KeyBytes()) == 0 {
|
||||
return clientv3.OpResponse{}, rpctypes.ErrEmptyKey
|
||||
}
|
||||
r, err := kv.KV.Do(ctx, kv.prefixOp(op))
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
switch {
|
||||
case r.Get() != nil:
|
||||
kv.unprefixGetResponse(r.Get())
|
||||
case r.Put() != nil:
|
||||
kv.unprefixPutResponse(r.Put())
|
||||
case r.Del() != nil:
|
||||
kv.unprefixDeleteResponse(r.Del())
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
type txnPrefix struct {
|
||||
clientv3.Txn
|
||||
kv *kvPrefix
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) Txn(ctx context.Context) clientv3.Txn {
|
||||
return &txnPrefix{kv.KV.Txn(ctx), kv}
|
||||
}
|
||||
|
||||
func (txn *txnPrefix) If(cs ...clientv3.Cmp) clientv3.Txn {
|
||||
newCmps := make([]clientv3.Cmp, len(cs))
|
||||
for i := range cs {
|
||||
newCmps[i] = cs[i]
|
||||
pfxKey, _ := txn.kv.prefixInterval(cs[i].KeyBytes(), nil)
|
||||
newCmps[i].WithKeyBytes(pfxKey)
|
||||
}
|
||||
txn.Txn = txn.Txn.If(newCmps...)
|
||||
return txn
|
||||
}
|
||||
|
||||
func (txn *txnPrefix) Then(ops ...clientv3.Op) clientv3.Txn {
|
||||
newOps := make([]clientv3.Op, len(ops))
|
||||
for i := range ops {
|
||||
newOps[i] = txn.kv.prefixOp(ops[i])
|
||||
}
|
||||
txn.Txn = txn.Txn.Then(newOps...)
|
||||
return txn
|
||||
}
|
||||
|
||||
func (txn *txnPrefix) Else(ops ...clientv3.Op) clientv3.Txn {
|
||||
newOps := make([]clientv3.Op, len(ops))
|
||||
for i := range ops {
|
||||
newOps[i] = txn.kv.prefixOp(ops[i])
|
||||
}
|
||||
txn.Txn = txn.Txn.Else(newOps...)
|
||||
return txn
|
||||
}
|
||||
|
||||
func (txn *txnPrefix) Commit() (*clientv3.TxnResponse, error) {
|
||||
resp, err := txn.Txn.Commit()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txn.kv.unprefixTxnResponse(resp)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) prefixOp(op clientv3.Op) clientv3.Op {
|
||||
begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes())
|
||||
op.WithKeyBytes(begin)
|
||||
op.WithRangeBytes(end)
|
||||
return op
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) unprefixGetResponse(resp *clientv3.GetResponse) {
|
||||
for i := range resp.Kvs {
|
||||
resp.Kvs[i].Key = resp.Kvs[i].Key[len(kv.pfx):]
|
||||
}
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) unprefixPutResponse(resp *clientv3.PutResponse) {
|
||||
if resp.PrevKv != nil {
|
||||
resp.PrevKv.Key = resp.PrevKv.Key[len(kv.pfx):]
|
||||
}
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) unprefixDeleteResponse(resp *clientv3.DeleteResponse) {
|
||||
for i := range resp.PrevKvs {
|
||||
resp.PrevKvs[i].Key = resp.PrevKvs[i].Key[len(kv.pfx):]
|
||||
}
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) unprefixTxnResponse(resp *clientv3.TxnResponse) {
|
||||
for _, r := range resp.Responses {
|
||||
switch tv := r.Response.(type) {
|
||||
case *pb.ResponseOp_ResponseRange:
|
||||
if tv.ResponseRange != nil {
|
||||
kv.unprefixGetResponse((*clientv3.GetResponse)(tv.ResponseRange))
|
||||
}
|
||||
case *pb.ResponseOp_ResponsePut:
|
||||
if tv.ResponsePut != nil {
|
||||
kv.unprefixPutResponse((*clientv3.PutResponse)(tv.ResponsePut))
|
||||
}
|
||||
case *pb.ResponseOp_ResponseDeleteRange:
|
||||
if tv.ResponseDeleteRange != nil {
|
||||
kv.unprefixDeleteResponse((*clientv3.DeleteResponse)(tv.ResponseDeleteRange))
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *kvPrefix) prefixInterval(key, end []byte) (pfxKey []byte, pfxEnd []byte) {
|
||||
return prefixInterval(p.pfx, key, end)
|
||||
}
|
58
vendor/github.com/coreos/etcd/clientv3/namespace/lease.go
generated
vendored
Normal file
58
vendor/github.com/coreos/etcd/clientv3/namespace/lease.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package namespace
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type leasePrefix struct {
|
||||
clientv3.Lease
|
||||
pfx []byte
|
||||
}
|
||||
|
||||
// NewLease wraps a Lease interface to filter for only keys with a prefix
|
||||
// and remove that prefix when fetching attached keys through TimeToLive.
|
||||
func NewLease(l clientv3.Lease, prefix string) clientv3.Lease {
|
||||
return &leasePrefix{l, []byte(prefix)}
|
||||
}
|
||||
|
||||
func (l *leasePrefix) TimeToLive(ctx context.Context, id clientv3.LeaseID, opts ...clientv3.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) {
|
||||
resp, err := l.Lease.TimeToLive(ctx, id, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(resp.Keys) > 0 {
|
||||
var outKeys [][]byte
|
||||
for i := range resp.Keys {
|
||||
if len(resp.Keys[i]) < len(l.pfx) {
|
||||
// too short
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(resp.Keys[i][:len(l.pfx)], l.pfx) {
|
||||
// doesn't match prefix
|
||||
continue
|
||||
}
|
||||
// strip prefix
|
||||
outKeys = append(outKeys, resp.Keys[i][len(l.pfx):])
|
||||
}
|
||||
resp.Keys = outKeys
|
||||
}
|
||||
return resp, nil
|
||||
}
|
42
vendor/github.com/coreos/etcd/clientv3/namespace/util.go
generated
vendored
Normal file
42
vendor/github.com/coreos/etcd/clientv3/namespace/util.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package namespace
|
||||
|
||||
func prefixInterval(pfx string, key, end []byte) (pfxKey []byte, pfxEnd []byte) {
|
||||
pfxKey = make([]byte, len(pfx)+len(key))
|
||||
copy(pfxKey[copy(pfxKey, pfx):], key)
|
||||
|
||||
if len(end) == 1 && end[0] == 0 {
|
||||
// the edge of the keyspace
|
||||
pfxEnd = make([]byte, len(pfx))
|
||||
copy(pfxEnd, pfx)
|
||||
ok := false
|
||||
for i := len(pfxEnd) - 1; i >= 0; i-- {
|
||||
if pfxEnd[i]++; pfxEnd[i] != 0 {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
// 0xff..ff => 0x00
|
||||
pfxEnd = []byte{0}
|
||||
}
|
||||
} else if len(end) >= 1 {
|
||||
pfxEnd = make([]byte, len(pfx)+len(end))
|
||||
copy(pfxEnd[copy(pfxEnd, pfx):], end)
|
||||
}
|
||||
|
||||
return pfxKey, pfxEnd
|
||||
}
|
84
vendor/github.com/coreos/etcd/clientv3/namespace/watch.go
generated
vendored
Normal file
84
vendor/github.com/coreos/etcd/clientv3/namespace/watch.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package namespace
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type watcherPrefix struct {
|
||||
clientv3.Watcher
|
||||
pfx string
|
||||
|
||||
wg sync.WaitGroup
|
||||
stopc chan struct{}
|
||||
stopOnce sync.Once
|
||||
}
|
||||
|
||||
// NewWatcher wraps a Watcher instance so that all Watch requests
|
||||
// are prefixed with a given string and all Watch responses have
|
||||
// the prefix removed.
|
||||
func NewWatcher(w clientv3.Watcher, prefix string) clientv3.Watcher {
|
||||
return &watcherPrefix{Watcher: w, pfx: prefix, stopc: make(chan struct{})}
|
||||
}
|
||||
|
||||
func (w *watcherPrefix) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
|
||||
// since OpOption is opaque, determine range for prefixing through an OpGet
|
||||
op := clientv3.OpGet(key, opts...)
|
||||
end := op.RangeBytes()
|
||||
pfxBegin, pfxEnd := prefixInterval(w.pfx, []byte(key), end)
|
||||
if pfxEnd != nil {
|
||||
opts = append(opts, clientv3.WithRange(string(pfxEnd)))
|
||||
}
|
||||
|
||||
wch := w.Watcher.Watch(ctx, string(pfxBegin), opts...)
|
||||
|
||||
// translate watch events from prefixed to unprefixed
|
||||
pfxWch := make(chan clientv3.WatchResponse)
|
||||
w.wg.Add(1)
|
||||
go func() {
|
||||
defer func() {
|
||||
close(pfxWch)
|
||||
w.wg.Done()
|
||||
}()
|
||||
for wr := range wch {
|
||||
for i := range wr.Events {
|
||||
wr.Events[i].Kv.Key = wr.Events[i].Kv.Key[len(w.pfx):]
|
||||
if wr.Events[i].PrevKv != nil {
|
||||
wr.Events[i].PrevKv.Key = wr.Events[i].Kv.Key
|
||||
}
|
||||
}
|
||||
select {
|
||||
case pfxWch <- wr:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-w.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return pfxWch
|
||||
}
|
||||
|
||||
func (w *watcherPrefix) Close() error {
|
||||
err := w.Watcher.Close()
|
||||
w.stopOnce.Do(func() { close(w.stopc) })
|
||||
w.wg.Wait()
|
||||
return err
|
||||
}
|
32
vendor/github.com/coreos/etcd/clientv3/naming/BUILD
generated
vendored
Normal file
32
vendor/github.com/coreos/etcd/clientv3/naming/BUILD
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"grpc.go",
|
||||
],
|
||||
importpath = "github.com/coreos/etcd/clientv3/naming",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/coreos/etcd/clientv3:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/codes:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/naming:go_default_library",
|
||||
"//vendor/google.golang.org/grpc/status:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
56
vendor/github.com/coreos/etcd/clientv3/naming/doc.go
generated
vendored
Normal file
56
vendor/github.com/coreos/etcd/clientv3/naming/doc.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package naming provides an etcd-backed gRPC resolver for discovering gRPC services.
|
||||
//
|
||||
// To use, first import the packages:
|
||||
//
|
||||
// import (
|
||||
// "github.com/coreos/etcd/clientv3"
|
||||
// etcdnaming "github.com/coreos/etcd/clientv3/naming"
|
||||
//
|
||||
// "google.golang.org/grpc"
|
||||
// "google.golang.org/grpc/naming"
|
||||
// )
|
||||
//
|
||||
// First, register new endpoint addresses for a service:
|
||||
//
|
||||
// func etcdAdd(c *clientv3.Client, service, addr string) error {
|
||||
// r := &etcdnaming.GRPCResolver{Client: c}
|
||||
// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr})
|
||||
// }
|
||||
//
|
||||
// Dial an RPC service using the etcd gRPC resolver and a gRPC Balancer:
|
||||
//
|
||||
// func etcdDial(c *clientv3.Client, service string) (*grpc.ClientConn, error) {
|
||||
// r := &etcdnaming.GRPCResolver{Client: c}
|
||||
// b := grpc.RoundRobin(r)
|
||||
// return grpc.Dial(service, grpc.WithBalancer(b))
|
||||
// }
|
||||
//
|
||||
// Optionally, force delete an endpoint:
|
||||
//
|
||||
// func etcdDelete(c *clientv3, service, addr string) error {
|
||||
// r := &etcdnaming.GRPCResolver{Client: c}
|
||||
// return r.Update(c.Ctx(), "my-service", naming.Update{Op: naming.Delete, Addr: "1.2.3.4"})
|
||||
// }
|
||||
//
|
||||
// Or register an expiring endpoint with a lease:
|
||||
//
|
||||
// func etcdLeaseAdd(c *clientv3.Client, lid clientv3.LeaseID, service, addr string) error {
|
||||
// r := &etcdnaming.GRPCResolver{Client: c}
|
||||
// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}, clientv3.WithLease(lid))
|
||||
// }
|
||||
//
|
||||
package naming
|
132
vendor/github.com/coreos/etcd/clientv3/naming/grpc.go
generated
vendored
Normal file
132
vendor/github.com/coreos/etcd/clientv3/naming/grpc.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package naming
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/naming"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var ErrWatcherClosed = fmt.Errorf("naming: watch closed")
|
||||
|
||||
// GRPCResolver creates a grpc.Watcher for a target to track its resolution changes.
|
||||
type GRPCResolver struct {
|
||||
// Client is an initialized etcd client.
|
||||
Client *etcd.Client
|
||||
}
|
||||
|
||||
func (gr *GRPCResolver) Update(ctx context.Context, target string, nm naming.Update, opts ...etcd.OpOption) (err error) {
|
||||
switch nm.Op {
|
||||
case naming.Add:
|
||||
var v []byte
|
||||
if v, err = json.Marshal(nm); err != nil {
|
||||
return status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
_, err = gr.Client.KV.Put(ctx, target+"/"+nm.Addr, string(v), opts...)
|
||||
case naming.Delete:
|
||||
_, err = gr.Client.Delete(ctx, target+"/"+nm.Addr, opts...)
|
||||
default:
|
||||
return status.Error(codes.InvalidArgument, "naming: bad naming op")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (gr *GRPCResolver) Resolve(target string) (naming.Watcher, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
w := &gRPCWatcher{c: gr.Client, target: target + "/", ctx: ctx, cancel: cancel}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
type gRPCWatcher struct {
|
||||
c *etcd.Client
|
||||
target string
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wch etcd.WatchChan
|
||||
err error
|
||||
}
|
||||
|
||||
// Next gets the next set of updates from the etcd resolver.
|
||||
// Calls to Next should be serialized; concurrent calls are not safe since
|
||||
// there is no way to reconcile the update ordering.
|
||||
func (gw *gRPCWatcher) Next() ([]*naming.Update, error) {
|
||||
if gw.wch == nil {
|
||||
// first Next() returns all addresses
|
||||
return gw.firstNext()
|
||||
}
|
||||
if gw.err != nil {
|
||||
return nil, gw.err
|
||||
}
|
||||
|
||||
// process new events on target/*
|
||||
wr, ok := <-gw.wch
|
||||
if !ok {
|
||||
gw.err = status.Error(codes.Unavailable, ErrWatcherClosed.Error())
|
||||
return nil, gw.err
|
||||
}
|
||||
if gw.err = wr.Err(); gw.err != nil {
|
||||
return nil, gw.err
|
||||
}
|
||||
|
||||
updates := make([]*naming.Update, 0, len(wr.Events))
|
||||
for _, e := range wr.Events {
|
||||
var jupdate naming.Update
|
||||
var err error
|
||||
switch e.Type {
|
||||
case etcd.EventTypePut:
|
||||
err = json.Unmarshal(e.Kv.Value, &jupdate)
|
||||
jupdate.Op = naming.Add
|
||||
case etcd.EventTypeDelete:
|
||||
err = json.Unmarshal(e.PrevKv.Value, &jupdate)
|
||||
jupdate.Op = naming.Delete
|
||||
}
|
||||
if err == nil {
|
||||
updates = append(updates, &jupdate)
|
||||
}
|
||||
}
|
||||
return updates, nil
|
||||
}
|
||||
|
||||
func (gw *gRPCWatcher) firstNext() ([]*naming.Update, error) {
|
||||
// Use serialized request so resolution still works if the target etcd
|
||||
// server is partitioned away from the quorum.
|
||||
resp, err := gw.c.Get(gw.ctx, gw.target, etcd.WithPrefix(), etcd.WithSerializable())
|
||||
if gw.err = err; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
updates := make([]*naming.Update, 0, len(resp.Kvs))
|
||||
for _, kv := range resp.Kvs {
|
||||
var jupdate naming.Update
|
||||
if err := json.Unmarshal(kv.Value, &jupdate); err != nil {
|
||||
continue
|
||||
}
|
||||
updates = append(updates, &jupdate)
|
||||
}
|
||||
|
||||
opts := []etcd.OpOption{etcd.WithRev(resp.Header.Revision + 1), etcd.WithPrefix(), etcd.WithPrevKV()}
|
||||
gw.wch = gw.c.Watch(gw.ctx, gw.target, opts...)
|
||||
return updates, nil
|
||||
}
|
||||
|
||||
func (gw *gRPCWatcher) Close() { gw.cancel() }
|
133
vendor/github.com/coreos/etcd/clientv3/op.go
generated
vendored
133
vendor/github.com/coreos/etcd/clientv3/op.go
generated
vendored
@ -23,6 +23,7 @@ const (
|
||||
tRange opType = iota + 1
|
||||
tPut
|
||||
tDeleteRange
|
||||
tTxn
|
||||
)
|
||||
|
||||
var (
|
||||
@ -52,6 +53,10 @@ type Op struct {
|
||||
// for watch, put, delete
|
||||
prevKV bool
|
||||
|
||||
// for put
|
||||
ignoreValue bool
|
||||
ignoreLease bool
|
||||
|
||||
// progressNotify is for progress updates.
|
||||
progressNotify bool
|
||||
// createdNotify is for created event
|
||||
@ -63,8 +68,69 @@ type Op struct {
|
||||
// for put
|
||||
val []byte
|
||||
leaseID LeaseID
|
||||
|
||||
// txn
|
||||
cmps []Cmp
|
||||
thenOps []Op
|
||||
elseOps []Op
|
||||
}
|
||||
|
||||
// accessors / mutators
|
||||
|
||||
func (op Op) IsTxn() bool { return op.t == tTxn }
|
||||
func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps }
|
||||
|
||||
// KeyBytes returns the byte slice holding the Op's key.
|
||||
func (op Op) KeyBytes() []byte { return op.key }
|
||||
|
||||
// WithKeyBytes sets the byte slice for the Op's key.
|
||||
func (op *Op) WithKeyBytes(key []byte) { op.key = key }
|
||||
|
||||
// RangeBytes returns the byte slice holding with the Op's range end, if any.
|
||||
func (op Op) RangeBytes() []byte { return op.end }
|
||||
|
||||
// Rev returns the requested revision, if any.
|
||||
func (op Op) Rev() int64 { return op.rev }
|
||||
|
||||
// IsPut returns true iff the operation is a Put.
|
||||
func (op Op) IsPut() bool { return op.t == tPut }
|
||||
|
||||
// IsGet returns true iff the operation is a Get.
|
||||
func (op Op) IsGet() bool { return op.t == tRange }
|
||||
|
||||
// IsDelete returns true iff the operation is a Delete.
|
||||
func (op Op) IsDelete() bool { return op.t == tDeleteRange }
|
||||
|
||||
// IsSerializable returns true if the serializable field is true.
|
||||
func (op Op) IsSerializable() bool { return op.serializable == true }
|
||||
|
||||
// IsKeysOnly returns whether keysOnly is set.
|
||||
func (op Op) IsKeysOnly() bool { return op.keysOnly == true }
|
||||
|
||||
// IsCountOnly returns whether countOnly is set.
|
||||
func (op Op) IsCountOnly() bool { return op.countOnly == true }
|
||||
|
||||
// MinModRev returns the operation's minimum modify revision.
|
||||
func (op Op) MinModRev() int64 { return op.minModRev }
|
||||
|
||||
// MaxModRev returns the operation's maximum modify revision.
|
||||
func (op Op) MaxModRev() int64 { return op.maxModRev }
|
||||
|
||||
// MinCreateRev returns the operation's minimum create revision.
|
||||
func (op Op) MinCreateRev() int64 { return op.minCreateRev }
|
||||
|
||||
// MaxCreateRev returns the operation's maximum create revision.
|
||||
func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
|
||||
|
||||
// WithRangeBytes sets the byte slice for the Op's range end.
|
||||
func (op *Op) WithRangeBytes(end []byte) { op.end = end }
|
||||
|
||||
// ValueBytes returns the byte slice holding the Op's value, if any.
|
||||
func (op Op) ValueBytes() []byte { return op.val }
|
||||
|
||||
// WithValueBytes sets the byte slice for the Op's value.
|
||||
func (op *Op) WithValueBytes(v []byte) { op.val = v }
|
||||
|
||||
func (op Op) toRangeRequest() *pb.RangeRequest {
|
||||
if op.t != tRange {
|
||||
panic("op.t != tRange")
|
||||
@ -89,12 +155,28 @@ func (op Op) toRangeRequest() *pb.RangeRequest {
|
||||
return r
|
||||
}
|
||||
|
||||
func (op Op) toTxnRequest() *pb.TxnRequest {
|
||||
thenOps := make([]*pb.RequestOp, len(op.thenOps))
|
||||
for i, tOp := range op.thenOps {
|
||||
thenOps[i] = tOp.toRequestOp()
|
||||
}
|
||||
elseOps := make([]*pb.RequestOp, len(op.elseOps))
|
||||
for i, eOp := range op.elseOps {
|
||||
elseOps[i] = eOp.toRequestOp()
|
||||
}
|
||||
cmps := make([]*pb.Compare, len(op.cmps))
|
||||
for i := range op.cmps {
|
||||
cmps[i] = (*pb.Compare)(&op.cmps[i])
|
||||
}
|
||||
return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
|
||||
}
|
||||
|
||||
func (op Op) toRequestOp() *pb.RequestOp {
|
||||
switch op.t {
|
||||
case tRange:
|
||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
|
||||
case tPut:
|
||||
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
|
||||
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
|
||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
|
||||
case tDeleteRange:
|
||||
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
|
||||
@ -105,6 +187,19 @@ func (op Op) toRequestOp() *pb.RequestOp {
|
||||
}
|
||||
|
||||
func (op Op) isWrite() bool {
|
||||
if op.t == tTxn {
|
||||
for _, tOp := range op.thenOps {
|
||||
if tOp.isWrite() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, tOp := range op.elseOps {
|
||||
if tOp.isWrite() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
return op.t != tRange
|
||||
}
|
||||
|
||||
@ -170,6 +265,10 @@ func OpPut(key, val string, opts ...OpOption) Op {
|
||||
return ret
|
||||
}
|
||||
|
||||
func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
|
||||
return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
|
||||
}
|
||||
|
||||
func opWatch(key string, opts ...OpOption) Op {
|
||||
ret := Op{t: tRange, key: []byte(key)}
|
||||
ret.applyOpts(opts)
|
||||
@ -207,6 +306,7 @@ func WithLease(leaseID LeaseID) OpOption {
|
||||
}
|
||||
|
||||
// WithLimit limits the number of results to return from 'Get' request.
|
||||
// If WithLimit is given a 0 limit, it is treated as no limit.
|
||||
func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
|
||||
|
||||
// WithRev specifies the store revision for 'Get' request.
|
||||
@ -222,9 +322,9 @@ func WithSort(target SortTarget, order SortOrder) OpOption {
|
||||
if target == SortByKey && order == SortAscend {
|
||||
// If order != SortNone, server fetches the entire key-space,
|
||||
// and then applies the sort and limit, if provided.
|
||||
// Since current mvcc.Range implementation returns results
|
||||
// sorted by keys in lexicographically ascending order,
|
||||
// client should ignore SortOrder if the target is SortByKey.
|
||||
// Since by default the server returns results sorted by keys
|
||||
// in lexicographically ascending order, the client should ignore
|
||||
// SortOrder if the target is SortByKey.
|
||||
order = SortNone
|
||||
}
|
||||
op.sort = &SortOption{target, order}
|
||||
@ -257,6 +357,10 @@ func getPrefix(key []byte) []byte {
|
||||
// can return 'foo1', 'foo2', and so on.
|
||||
func WithPrefix() OpOption {
|
||||
return func(op *Op) {
|
||||
if len(op.key) == 0 {
|
||||
op.key, op.end = []byte{0}, []byte{0}
|
||||
return
|
||||
}
|
||||
op.end = getPrefix(op.key)
|
||||
}
|
||||
}
|
||||
@ -360,6 +464,24 @@ func WithPrevKV() OpOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithIgnoreValue updates the key using its current value.
|
||||
// This option can not be combined with non-empty values.
|
||||
// Returns an error if the key does not exist.
|
||||
func WithIgnoreValue() OpOption {
|
||||
return func(op *Op) {
|
||||
op.ignoreValue = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithIgnoreLease updates the key using its current lease.
|
||||
// This option can not be combined with WithLease.
|
||||
// Returns an error if the key does not exist.
|
||||
func WithIgnoreLease() OpOption {
|
||||
return func(op *Op) {
|
||||
op.ignoreLease = true
|
||||
}
|
||||
}
|
||||
|
||||
// LeaseOp represents an Operation that lease can execute.
|
||||
type LeaseOp struct {
|
||||
id LeaseID
|
||||
@ -377,8 +499,7 @@ func (op *LeaseOp) applyOpts(opts []LeaseOption) {
|
||||
}
|
||||
}
|
||||
|
||||
// WithAttachedKeys requests lease timetolive API to return
|
||||
// attached keys of given lease ID.
|
||||
// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
|
||||
func WithAttachedKeys() LeaseOption {
|
||||
return func(op *LeaseOp) { op.attachedKeys = true }
|
||||
}
|
||||
|
30
vendor/github.com/coreos/etcd/clientv3/ready_wait.go
generated
vendored
Normal file
30
vendor/github.com/coreos/etcd/clientv3/ready_wait.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import "golang.org/x/net/context"
|
||||
|
||||
// TODO: remove this when "FailFast=false" is fixed.
|
||||
// See https://github.com/grpc/grpc-go/issues/1532.
|
||||
func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error {
|
||||
select {
|
||||
case <-ready:
|
||||
return nil
|
||||
case <-rpcCtx.Done():
|
||||
return rpcCtx.Err()
|
||||
case <-clientCtx.Done():
|
||||
return clientCtx.Err()
|
||||
}
|
||||
}
|
376
vendor/github.com/coreos/etcd/clientv3/retry.go
generated
vendored
376
vendor/github.com/coreos/etcd/clientv3/retry.go
generated
vendored
@ -17,135 +17,183 @@ package clientv3
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type rpcFunc func(ctx context.Context) error
|
||||
type retryRpcFunc func(context.Context, rpcFunc) error
|
||||
type retryRPCFunc func(context.Context, rpcFunc) error
|
||||
type retryStopErrFunc func(error) bool
|
||||
|
||||
func (c *Client) newRetryWrapper() retryRpcFunc {
|
||||
func isRepeatableStopError(err error) bool {
|
||||
eErr := rpctypes.Error(err)
|
||||
// always stop retry on etcd errors
|
||||
if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
|
||||
return true
|
||||
}
|
||||
// only retry if unavailable
|
||||
ev, _ := status.FromError(err)
|
||||
return ev.Code() != codes.Unavailable
|
||||
}
|
||||
|
||||
func isNonRepeatableStopError(err error) bool {
|
||||
ev, _ := status.FromError(err)
|
||||
if ev.Code() != codes.Unavailable {
|
||||
return true
|
||||
}
|
||||
desc := rpctypes.ErrorDesc(err)
|
||||
return desc != "there is no address available" && desc != "there is no connection available"
|
||||
}
|
||||
|
||||
func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||
for {
|
||||
if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil {
|
||||
return err
|
||||
}
|
||||
pinned := c.balancer.pinned()
|
||||
err := f(rpcCtx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
eErr := rpctypes.Error(err)
|
||||
// always stop retry on etcd errors
|
||||
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
||||
return err
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned)
|
||||
}
|
||||
|
||||
// only retry if unavailable
|
||||
if grpc.Code(err) != codes.Unavailable {
|
||||
return err
|
||||
if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) {
|
||||
// mark this before endpoint switch is triggered
|
||||
c.balancer.hostPortError(pinned, err)
|
||||
c.balancer.next()
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-c.balancer.ConnectNotify():
|
||||
case <-rpcCtx.Done():
|
||||
return rpcCtx.Err()
|
||||
case <-c.ctx.Done():
|
||||
return c.ctx.Err()
|
||||
if isStop(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) newAuthRetryWrapper() retryRpcFunc {
|
||||
func (c *Client) newAuthRetryWrapper() retryRPCFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||
for {
|
||||
pinned := c.balancer.pinned()
|
||||
err := f(rpcCtx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned)
|
||||
}
|
||||
// always stop retry on etcd errors other than invalid auth token
|
||||
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
|
||||
gterr := c.getToken(rpcCtx)
|
||||
if gterr != nil {
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned)
|
||||
}
|
||||
return err // return the original error for simplicity
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
|
||||
// RetryKVClient implements a KVClient.
|
||||
func RetryKVClient(c *Client) pb.KVClient {
|
||||
retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
||||
return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}}
|
||||
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
|
||||
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
|
||||
conn := pb.NewKVClient(c.conn)
|
||||
retryBasic := &retryKVClient{&nonRepeatableKVClient{conn, nonRepeatableRetry}, repeatableRetry}
|
||||
retryAuthWrapper := c.newAuthRetryWrapper()
|
||||
return &retryKVClient{
|
||||
&nonRepeatableKVClient{retryBasic, retryAuthWrapper},
|
||||
retryAuthWrapper}
|
||||
}
|
||||
|
||||
type retryKVClient struct {
|
||||
*retryWriteKVClient
|
||||
*nonRepeatableKVClient
|
||||
repeatableRetry retryRPCFunc
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...)
|
||||
err = rkv.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.kc.Range(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryWriteKVClient struct {
|
||||
pb.KVClient
|
||||
retryf retryRpcFunc
|
||||
type nonRepeatableKVClient struct {
|
||||
kc pb.KVClient
|
||||
nonRepeatableRetry retryRPCFunc
|
||||
}
|
||||
|
||||
func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Put(rctx, in, opts...)
|
||||
func (rkv *nonRepeatableKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.kc.Put(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
|
||||
func (rkv *nonRepeatableKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.kc.DeleteRange(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
|
||||
func (rkv *nonRepeatableKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||
// TODO: repeatableRetry if read-only txn
|
||||
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.kc.Txn(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
|
||||
func (rkv *nonRepeatableKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||
err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.kc.Compact(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryLeaseClient struct {
|
||||
pb.LeaseClient
|
||||
retryf retryRpcFunc
|
||||
lc pb.LeaseClient
|
||||
repeatableRetry retryRPCFunc
|
||||
}
|
||||
|
||||
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
|
||||
// RetryLeaseClient implements a LeaseClient.
|
||||
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
||||
retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
||||
return &retryLeaseClient{retry, c.retryAuthWrapper}
|
||||
retry := &retryLeaseClient{
|
||||
pb.NewLeaseClient(c.conn),
|
||||
c.newRetryWrapper(isRepeatableStopError),
|
||||
}
|
||||
return &retryLeaseClient{retry, c.newAuthRetryWrapper()}
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
|
||||
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
|
||||
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.lc.LeaseGrant(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
@ -153,140 +201,270 @@ func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRe
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
|
||||
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
|
||||
err = rlc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...)
|
||||
return err
|
||||
})
|
||||
return stream, err
|
||||
}
|
||||
|
||||
type retryClusterClient struct {
|
||||
pb.ClusterClient
|
||||
retryf retryRpcFunc
|
||||
*nonRepeatableClusterClient
|
||||
repeatableRetry retryRPCFunc
|
||||
}
|
||||
|
||||
// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy.
|
||||
// RetryClusterClient implements a ClusterClient.
|
||||
func RetryClusterClient(c *Client) pb.ClusterClient {
|
||||
return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper}
|
||||
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
|
||||
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
|
||||
cc := pb.NewClusterClient(c.conn)
|
||||
return &retryClusterClient{&nonRepeatableClusterClient{cc, nonRepeatableRetry}, repeatableRetry}
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
|
||||
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
|
||||
err = rcc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.cc.MemberList(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
|
||||
type nonRepeatableClusterClient struct {
|
||||
cc pb.ClusterClient
|
||||
nonRepeatableRetry retryRPCFunc
|
||||
}
|
||||
|
||||
func (rcc *nonRepeatableClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||
err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.cc.MemberAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
|
||||
func (rcc *nonRepeatableClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
||||
err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.cc.MemberRemove(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rcc *nonRepeatableClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
||||
err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.cc.MemberUpdate(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// RetryMaintenanceClient implements a Maintenance.
|
||||
func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
|
||||
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
|
||||
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
|
||||
mc := pb.NewMaintenanceClient(conn)
|
||||
return &retryMaintenanceClient{&nonRepeatableMaintenanceClient{mc, nonRepeatableRetry}, repeatableRetry}
|
||||
}
|
||||
|
||||
type retryMaintenanceClient struct {
|
||||
*nonRepeatableMaintenanceClient
|
||||
repeatableRetry retryRPCFunc
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
|
||||
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.Alarm(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
|
||||
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.Status(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
|
||||
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.Hash(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
|
||||
err = rmc.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
stream, err = rmc.mc.Snapshot(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return stream, err
|
||||
}
|
||||
|
||||
type nonRepeatableMaintenanceClient struct {
|
||||
mc pb.MaintenanceClient
|
||||
nonRepeatableRetry retryRPCFunc
|
||||
}
|
||||
|
||||
func (rmc *nonRepeatableMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
|
||||
err = rmc.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.Defragment(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryAuthClient struct {
|
||||
pb.AuthClient
|
||||
retryf retryRpcFunc
|
||||
*nonRepeatableAuthClient
|
||||
repeatableRetry retryRPCFunc
|
||||
}
|
||||
|
||||
// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy.
|
||||
// RetryAuthClient implements a AuthClient.
|
||||
func RetryAuthClient(c *Client) pb.AuthClient {
|
||||
return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper}
|
||||
repeatableRetry := c.newRetryWrapper(isRepeatableStopError)
|
||||
nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError)
|
||||
ac := pb.NewAuthClient(c.conn)
|
||||
return &retryAuthClient{&nonRepeatableAuthClient{ac, nonRepeatableRetry}, repeatableRetry}
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
|
||||
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
|
||||
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserList(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
|
||||
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
|
||||
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserGet(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
|
||||
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
|
||||
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.RoleGet(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
|
||||
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
|
||||
err = rac.repeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.RoleList(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
|
||||
type nonRepeatableAuthClient struct {
|
||||
ac pb.AuthClient
|
||||
nonRepeatableRetry retryRPCFunc
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.AuthEnable(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
|
||||
func (rac *nonRepeatableAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.AuthDisable(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
|
||||
func (rac *nonRepeatableAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
|
||||
func (rac *nonRepeatableAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserDelete(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
|
||||
func (rac *nonRepeatableAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserChangePassword(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
|
||||
func (rac *nonRepeatableAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserGrantRole(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
|
||||
func (rac *nonRepeatableAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserRevokeRole(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.RoleAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.RoleDelete(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *nonRepeatableAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
|
||||
err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.Authenticate(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
|
30
vendor/github.com/coreos/etcd/clientv3/txn.go
generated
vendored
30
vendor/github.com/coreos/etcd/clientv3/txn.go
generated
vendored
@ -18,13 +18,13 @@ import (
|
||||
"sync"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Txn is the interface that wraps mini-transactions.
|
||||
//
|
||||
// Tx.If(
|
||||
// Txn(context.TODO()).If(
|
||||
// Compare(Value(k1), ">", v1),
|
||||
// Compare(Version(k1), "=", 2)
|
||||
// ).Then(
|
||||
@ -49,8 +49,6 @@ type Txn interface {
|
||||
|
||||
// Commit tries to commit the transaction.
|
||||
Commit() (*TxnResponse, error)
|
||||
|
||||
// TODO: add a Do for shortcut the txn without any condition?
|
||||
}
|
||||
|
||||
type txn struct {
|
||||
@ -137,30 +135,14 @@ func (txn *txn) Else(ops ...Op) Txn {
|
||||
func (txn *txn) Commit() (*TxnResponse, error) {
|
||||
txn.mu.Lock()
|
||||
defer txn.mu.Unlock()
|
||||
for {
|
||||
resp, err := txn.commit()
|
||||
if err == nil {
|
||||
return resp, err
|
||||
}
|
||||
if isHaltErr(txn.ctx, err) {
|
||||
return nil, toErr(txn.ctx, err)
|
||||
}
|
||||
if txn.isWrite {
|
||||
return nil, toErr(txn.ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (txn *txn) commit() (*TxnResponse, error) {
|
||||
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
|
||||
|
||||
var opts []grpc.CallOption
|
||||
if !txn.isWrite {
|
||||
opts = []grpc.CallOption{grpc.FailFast(false)}
|
||||
}
|
||||
resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...)
|
||||
var resp *pb.TxnResponse
|
||||
var err error
|
||||
resp, err = txn.kv.remote.Txn(txn.ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, toErr(txn.ctx, err)
|
||||
}
|
||||
return (*TxnResponse)(resp), nil
|
||||
}
|
||||
|
74
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
74
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
@ -22,8 +22,12 @@ import (
|
||||
v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -39,10 +43,9 @@ type WatchChan <-chan WatchResponse
|
||||
|
||||
type Watcher interface {
|
||||
// Watch watches on a key or prefix. The watched events will be returned
|
||||
// through the returned channel.
|
||||
// If the watch is slow or the required rev is compacted, the watch request
|
||||
// might be canceled from the server-side and the chan will be closed.
|
||||
// 'opts' can be: 'WithRev' and/or 'WithPrefix'.
|
||||
// through the returned channel. If revisions waiting to be sent over the
|
||||
// watch are compacted, then the watch will be canceled by the server, the
|
||||
// client will post a compacted error watch response, and the channel will close.
|
||||
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
|
||||
|
||||
// Close closes the watcher and cancels all watch requests.
|
||||
@ -65,6 +68,9 @@ type WatchResponse struct {
|
||||
Created bool
|
||||
|
||||
closeErr error
|
||||
|
||||
// cancelReason is a reason of canceling watch
|
||||
cancelReason string
|
||||
}
|
||||
|
||||
// IsCreate returns true if the event tells that the key is newly created.
|
||||
@ -85,6 +91,9 @@ func (wr *WatchResponse) Err() error {
|
||||
case wr.CompactRevision != 0:
|
||||
return v3rpc.ErrCompacted
|
||||
case wr.Canceled:
|
||||
if len(wr.cancelReason) != 0 {
|
||||
return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
|
||||
}
|
||||
return v3rpc.ErrFutureRev
|
||||
}
|
||||
return nil
|
||||
@ -128,7 +137,7 @@ type watchGrpcStream struct {
|
||||
respc chan *pb.WatchResponse
|
||||
// donec closes to broadcast shutdown
|
||||
donec chan struct{}
|
||||
// errc transmits errors from grpc Recv to the watch stream reconn logic
|
||||
// errc transmits errors from grpc Recv to the watch stream reconnect logic
|
||||
errc chan error
|
||||
// closingc gets the watcherStream of closing watchers
|
||||
closingc chan *watcherStream
|
||||
@ -207,16 +216,15 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
|
||||
owner: w,
|
||||
remote: w.remote,
|
||||
ctx: ctx,
|
||||
ctxKey: fmt.Sprintf("%v", inctx),
|
||||
ctxKey: streamKeyFromCtx(inctx),
|
||||
cancel: cancel,
|
||||
substreams: make(map[int64]*watcherStream),
|
||||
|
||||
respc: make(chan *pb.WatchResponse),
|
||||
reqc: make(chan *watchRequest),
|
||||
donec: make(chan struct{}),
|
||||
errc: make(chan error, 1),
|
||||
closingc: make(chan *watcherStream),
|
||||
resumec: make(chan struct{}),
|
||||
respc: make(chan *pb.WatchResponse),
|
||||
reqc: make(chan *watchRequest),
|
||||
donec: make(chan struct{}),
|
||||
errc: make(chan error, 1),
|
||||
closingc: make(chan *watcherStream),
|
||||
resumec: make(chan struct{}),
|
||||
}
|
||||
go wgs.run()
|
||||
return wgs
|
||||
@ -247,7 +255,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
|
||||
}
|
||||
|
||||
ok := false
|
||||
ctxKey := fmt.Sprintf("%v", ctx)
|
||||
ctxKey := streamKeyFromCtx(ctx)
|
||||
|
||||
// find or allocate appropriate grpc watch stream
|
||||
w.mu.Lock()
|
||||
@ -310,14 +318,14 @@ func (w *watcher) Close() (err error) {
|
||||
w.streams = nil
|
||||
w.mu.Unlock()
|
||||
for _, wgs := range streams {
|
||||
if werr := wgs.Close(); werr != nil {
|
||||
if werr := wgs.close(); werr != nil {
|
||||
err = werr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) Close() (err error) {
|
||||
func (w *watchGrpcStream) close() (err error) {
|
||||
w.cancel()
|
||||
<-w.donec
|
||||
select {
|
||||
@ -428,7 +436,7 @@ func (w *watchGrpcStream) run() {
|
||||
initReq: *wreq,
|
||||
id: -1,
|
||||
outc: outc,
|
||||
// unbufffered so resumes won't cause repeat events
|
||||
// unbuffered so resumes won't cause repeat events
|
||||
recvc: make(chan *WatchResponse),
|
||||
}
|
||||
|
||||
@ -480,7 +488,7 @@ func (w *watchGrpcStream) run() {
|
||||
req := &pb.WatchRequest{RequestUnion: cr}
|
||||
wc.Send(req)
|
||||
}
|
||||
// watch client failed to recv; spawn another if possible
|
||||
// watch client failed on Recv; spawn another if possible
|
||||
case err := <-w.errc:
|
||||
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
||||
closeErr = err
|
||||
@ -520,10 +528,6 @@ func (w *watchGrpcStream) nextResume() *watcherStream {
|
||||
|
||||
// dispatchEvent sends a WatchResponse to the appropriate watcher stream
|
||||
func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
|
||||
ws, ok := w.substreams[pbresp.WatchId]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
events := make([]*Event, len(pbresp.Events))
|
||||
for i, ev := range pbresp.Events {
|
||||
events[i] = (*Event)(ev)
|
||||
@ -534,6 +538,11 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
|
||||
CompactRevision: pbresp.CompactRevision,
|
||||
Created: pbresp.Created,
|
||||
Canceled: pbresp.Canceled,
|
||||
cancelReason: pbresp.CancelReason,
|
||||
}
|
||||
ws, ok := w.substreams[pbresp.WatchId]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
select {
|
||||
case ws.recvc <- wr:
|
||||
@ -725,7 +734,11 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str
|
||||
ws.closing = true
|
||||
close(ws.outc)
|
||||
ws.outc = nil
|
||||
go func() { w.closingc <- ws }()
|
||||
w.wg.Add(1)
|
||||
go func() {
|
||||
defer w.wg.Done()
|
||||
w.closingc <- ws
|
||||
}()
|
||||
case <-stopc:
|
||||
}
|
||||
}(w.resuming[i])
|
||||
@ -737,7 +750,7 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str
|
||||
return donec
|
||||
}
|
||||
|
||||
// joinSubstream waits for all substream goroutines to complete
|
||||
// joinSubstreams waits for all substream goroutines to complete.
|
||||
func (w *watchGrpcStream) joinSubstreams() {
|
||||
for _, ws := range w.substreams {
|
||||
<-ws.donec
|
||||
@ -749,7 +762,9 @@ func (w *watchGrpcStream) joinSubstreams() {
|
||||
}
|
||||
}
|
||||
|
||||
// openWatchClient retries opening a watchclient until retryConnection fails
|
||||
// openWatchClient retries opening a watch client until success or halt.
|
||||
// manually retry in case "ws==nil && err==nil"
|
||||
// TODO: remove FailFast=false
|
||||
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
|
||||
for {
|
||||
select {
|
||||
@ -770,7 +785,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error)
|
||||
return ws, nil
|
||||
}
|
||||
|
||||
// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest)
|
||||
// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
|
||||
func (wr *watchRequest) toPB() *pb.WatchRequest {
|
||||
req := &pb.WatchCreateRequest{
|
||||
StartRevision: wr.rev,
|
||||
@ -783,3 +798,10 @@ func (wr *watchRequest) toPB() *pb.WatchRequest {
|
||||
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
||||
return &pb.WatchRequest{RequestUnion: cr}
|
||||
}
|
||||
|
||||
func streamKeyFromCtx(ctx context.Context) string {
|
||||
if md, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||
return fmt.Sprintf("%+v", md)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
18
vendor/github.com/coreos/etcd/compactor/compactor.go
generated
vendored
18
vendor/github.com/coreos/etcd/compactor/compactor.go
generated
vendored
@ -30,7 +30,8 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
checkCompactionInterval = 5 * time.Minute
|
||||
checkCompactionInterval = 5 * time.Minute
|
||||
executeCompactionInterval = time.Hour
|
||||
)
|
||||
|
||||
type Compactable interface {
|
||||
@ -41,6 +42,8 @@ type RevGetter interface {
|
||||
Rev() int64
|
||||
}
|
||||
|
||||
// Periodic compacts the log by purging revisions older than
|
||||
// the configured retention time. Compaction happens hourly.
|
||||
type Periodic struct {
|
||||
clock clockwork.Clock
|
||||
periodInHour int
|
||||
@ -85,11 +88,12 @@ func (t *Periodic) Run() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if clock.Now().Sub(last) < time.Duration(t.periodInHour)*time.Hour {
|
||||
|
||||
if clock.Now().Sub(last) < executeCompactionInterval {
|
||||
continue
|
||||
}
|
||||
|
||||
rev := t.getRev(t.periodInHour)
|
||||
rev, remaining := t.getRev(t.periodInHour)
|
||||
if rev < 0 {
|
||||
continue
|
||||
}
|
||||
@ -97,7 +101,7 @@ func (t *Periodic) Run() {
|
||||
plog.Noticef("Starting auto-compaction at revision %d", rev)
|
||||
_, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev})
|
||||
if err == nil || err == mvcc.ErrCompacted {
|
||||
t.revs = make([]int64, 0)
|
||||
t.revs = remaining
|
||||
last = clock.Now()
|
||||
plog.Noticef("Finished auto-compaction at revision %d", rev)
|
||||
} else {
|
||||
@ -124,10 +128,10 @@ func (t *Periodic) Resume() {
|
||||
t.paused = false
|
||||
}
|
||||
|
||||
func (t *Periodic) getRev(h int) int64 {
|
||||
func (t *Periodic) getRev(h int) (int64, []int64) {
|
||||
i := len(t.revs) - int(time.Duration(h)*time.Hour/checkCompactionInterval)
|
||||
if i < 0 {
|
||||
return -1
|
||||
return -1, t.revs
|
||||
}
|
||||
return t.revs[i]
|
||||
return t.revs[i], t.revs[i+1:]
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user