Move deps from _workspace/ to vendor/

godep restore
pushd $GOPATH/src/github.com/appc/spec
git co master
popd
go get go4.org/errorutil
rm -rf Godeps
godep save ./...
git add vendor
git add -f $(git ls-files --other vendor/)
git co -- Godeps/LICENSES Godeps/.license_file_state Godeps/OWNERS
This commit is contained in:
Tim Hockin
2016-05-08 20:30:21 -07:00
parent 899f9b4e31
commit 3c0c5ed4e0
4400 changed files with 16739 additions and 376 deletions

74
vendor/github.com/coreos/etcd/clientv3/README.md generated vendored Normal file
View File

@@ -0,0 +1,74 @@
# etcd/clientv3
[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3)
`etcd/clientv3` is the official Go etcd client for v3.
## Install
```bash
go get github.com/coreos/etcd/clientv3
```
## Get started
Create client using `clientv3.New`:
```go
cli, err := clientv3.New(clientv3.Config{
Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
DialTimeout: 5 * time.Second,
})
if err != nil {
// handle error!
}
defer cli.Close()
```
etcd v3 uses [`gRPC`](http://www.grpc.io) for remote procedure calls. And `clientv3` uses
[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it.
If the client is not closed, the connection will have leaky goroutines. To specify client request timeout,
pass `context.WithTimeout` to APIs:
```go
ctx, cancel := context.WithTimeout(context.Background(), timeout)
resp, err := kvc.Put(ctx, "sample_key", "sample_value")
cancel()
if err != nil {
// handle error!
}
// use the response
```
etcd uses go's `vendor` directory to manage external dependencies. If `clientv3` is imported
outside of etcd, simply copy `clientv3` to the `vendor` directory or use tools like godep to
manage your own dependency, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
## Error Handling
etcd client returns 2 types of errors:
1. context error: canceled or deadline exceeded.
2. gRPC error: see [v3rpc/error](https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/error.go).
Here is the example code to handle client errors:
```go
resp, err := kvc.Put(ctx, "", "")
if err != nil {
if err == context.Canceled {
// ctx is canceled by another routine
} else if err == context.DeadlineExceeded {
// ctx is attached with a deadline and it exceeded
} else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
// process (verr.Errors)
} else {
// bad cluster endpoints, which are not etcd servers
}
}
```
## Examples
More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).

87
vendor/github.com/coreos/etcd/clientv3/auth.go generated vendored Normal file
View File

@@ -0,0 +1,87 @@
// Copyright 2016 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
AuthEnableResponse pb.AuthEnableResponse
AuthUserAddResponse pb.AuthUserAddResponse
AuthUserDeleteResponse pb.AuthUserDeleteResponse
AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
AuthRoleAddResponse pb.AuthRoleAddResponse
)
type Auth interface {
// AuthEnable enables auth of an etcd cluster.
AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
// UserAdd adds a new user to an etcd cluster.
UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
// UserDelete deletes a user from an etcd cluster.
UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
// UserChangePassword changes a password of a user.
UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
// RoleAdd adds a new user to an etcd cluster.
RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
}
type auth struct {
c *Client
conn *grpc.ClientConn // conn in-use
remote pb.AuthClient
}
func NewAuth(c *Client) Auth {
conn := c.ActiveConnection()
return &auth{
conn: c.ActiveConnection(),
remote: pb.NewAuthClient(conn),
c: c,
}
}
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{})
return (*AuthEnableResponse)(resp), err
}
func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password})
return (*AuthUserAddResponse)(resp), err
}
func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name})
return (*AuthUserDeleteResponse)(resp), err
}
func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password})
return (*AuthUserChangePasswordResponse)(resp), err
}
func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name})
return (*AuthRoleAddResponse)(resp), err
}

243
vendor/github.com/coreos/etcd/clientv3/client.go generated vendored Normal file
View File

@@ -0,0 +1,243 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"errors"
"io/ioutil"
"log"
"net"
"net/url"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
var (
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
)
// Client provides and manages an etcd v3 client session.
type Client struct {
Cluster
KV
Lease
Watcher
Auth
Maintenance
conn *grpc.ClientConn
cfg Config
creds *credentials.TransportAuthenticator
mu sync.RWMutex // protects connection selection and error list
errors []error // errors passed to retryConnection
ctx context.Context
cancel context.CancelFunc
}
// New creates a new etcdv3 client from a given configuration.
func New(cfg Config) (*Client, error) {
if cfg.RetryDialer == nil {
cfg.RetryDialer = dialEndpointList
}
if len(cfg.Endpoints) == 0 {
return nil, ErrNoAvailableEndpoints
}
return newClient(&cfg)
}
// NewFromURL creates a new etcdv3 client from a URL.
func NewFromURL(url string) (*Client, error) {
return New(Config{Endpoints: []string{url}})
}
// NewFromConfigFile creates a new etcdv3 client from a configuration file.
func NewFromConfigFile(path string) (*Client, error) {
cfg, err := configFromFile(path)
if err != nil {
return nil, err
}
return New(*cfg)
}
// Close shuts down the client's etcd connections.
func (c *Client) Close() error {
c.mu.Lock()
if c.cancel == nil {
c.mu.Unlock()
return nil
}
c.cancel()
c.cancel = nil
c.mu.Unlock()
c.Watcher.Close()
c.Lease.Close()
return c.conn.Close()
}
// Ctx is a context for "out of band" messages (e.g., for sending
// "clean up" message when another context is canceled). It is
// canceled on client Close().
func (c *Client) Ctx() context.Context { return c.ctx }
// Endpoints lists the registered endpoints for the client.
func (c *Client) Endpoints() []string { return c.cfg.Endpoints }
// Errors returns all errors that have been observed since called last.
func (c *Client) Errors() (errs []error) {
c.mu.Lock()
defer c.mu.Unlock()
errs = c.errors
c.errors = nil
return errs
}
// Dial establishes a connection for a given endpoint using the client's config
func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
opts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithTimeout(c.cfg.DialTimeout),
}
if c.creds != nil {
opts = append(opts, grpc.WithTransportCredentials(*c.creds))
} else {
opts = append(opts, grpc.WithInsecure())
}
proto := "tcp"
if url, uerr := url.Parse(endpoint); uerr == nil && url.Scheme == "unix" {
proto = "unix"
// strip unix:// prefix so certs work
endpoint = url.Host
}
f := func(a string, t time.Duration) (net.Conn, error) {
select {
case <-c.ctx.Done():
return nil, c.ctx.Err()
default:
}
return net.DialTimeout(proto, a, t)
}
opts = append(opts, grpc.WithDialer(f))
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return nil, err
}
return conn, nil
}
func newClient(cfg *Config) (*Client, error) {
if cfg == nil {
cfg = &Config{RetryDialer: dialEndpointList}
}
var creds *credentials.TransportAuthenticator
if cfg.TLS != nil {
c := credentials.NewTLS(cfg.TLS)
creds = &c
}
// use a temporary skeleton client to bootstrap first connection
ctx, cancel := context.WithCancel(context.TODO())
conn, err := cfg.RetryDialer(&Client{cfg: *cfg, creds: creds, ctx: ctx})
if err != nil {
return nil, err
}
client := &Client{
conn: conn,
cfg: *cfg,
creds: creds,
ctx: ctx,
cancel: cancel,
}
client.Cluster = NewCluster(client)
client.KV = NewKV(client)
client.Lease = NewLease(client)
client.Watcher = NewWatcher(client)
client.Auth = NewAuth(client)
client.Maintenance = NewMaintenance(client)
if cfg.Logger != nil {
logger.Set(cfg.Logger)
} else {
// disable client side grpc by default
logger.Set(log.New(ioutil.Discard, "", 0))
}
return client, nil
}
// ActiveConnection returns the current in-use connection
func (c *Client) ActiveConnection() *grpc.ClientConn {
c.mu.RLock()
defer c.mu.RUnlock()
return c.conn
}
// retryConnection establishes a new connection
func (c *Client) retryConnection(oldConn *grpc.ClientConn, err error) (*grpc.ClientConn, error) {
c.mu.Lock()
defer c.mu.Unlock()
if err != nil {
c.errors = append(c.errors, err)
}
if c.cancel == nil {
return nil, c.ctx.Err()
}
if oldConn != c.conn {
// conn has already been updated
return c.conn, nil
}
oldConn.Close()
if st, _ := oldConn.State(); st != grpc.Shutdown {
// wait for shutdown so grpc doesn't leak sleeping goroutines
oldConn.WaitForStateChange(c.ctx, st)
}
conn, dialErr := c.cfg.RetryDialer(c)
if dialErr != nil {
c.errors = append(c.errors, dialErr)
return nil, dialErr
}
c.conn = conn
return c.conn, nil
}
// dialEndpointList attempts to connect to each endpoint in order until a
// connection is established.
func dialEndpointList(c *Client) (*grpc.ClientConn, error) {
var err error
for _, ep := range c.Endpoints() {
conn, curErr := c.Dial(ep)
if curErr != nil {
err = curErr
} else {
return conn, nil
}
}
return nil, err
}
// isHalted returns true if the given error and context indicate no forward
// progress can be made, even after reconnecting.
func isHalted(ctx context.Context, err error) bool {
isRPCError := strings.HasPrefix(grpc.ErrorDesc(err), "etcdserver: ")
return isRPCError || ctx.Err() != nil
}

170
vendor/github.com/coreos/etcd/clientv3/cluster.go generated vendored Normal file
View File

@@ -0,0 +1,170 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"sync"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
Member pb.Member
MemberListResponse pb.MemberListResponse
MemberAddResponse pb.MemberAddResponse
MemberRemoveResponse pb.MemberRemoveResponse
MemberUpdateResponse pb.MemberUpdateResponse
)
type Cluster interface {
// MemberList lists the current cluster membership.
MemberList(ctx context.Context) (*MemberListResponse, error)
// MemberLeader returns the current leader member.
MemberLeader(ctx context.Context) (*Member, error)
// MemberAdd adds a new member into the cluster.
MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
// MemberRemove removes an existing member from the cluster.
MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
// MemberUpdate updates the peer addresses of the member.
MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
}
type cluster struct {
c *Client
mu sync.Mutex
conn *grpc.ClientConn // conn in-use
remote pb.ClusterClient
}
func NewCluster(c *Client) Cluster {
conn := c.ActiveConnection()
return &cluster{
c: c,
conn: conn,
remote: pb.NewClusterClient(conn),
}
}
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
resp, err := c.getRemote().MemberAdd(ctx, r)
if err == nil {
return (*MemberAddResponse)(resp), nil
}
if isHalted(ctx, err) {
return nil, err
}
go c.switchRemote(err)
return nil, err
}
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
r := &pb.MemberRemoveRequest{ID: id}
resp, err := c.getRemote().MemberRemove(ctx, r)
if err == nil {
return (*MemberRemoveResponse)(resp), nil
}
if isHalted(ctx, err) {
return nil, err
}
go c.switchRemote(err)
return nil, err
}
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
// it is safe to retry on update.
for {
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
resp, err := c.getRemote().MemberUpdate(ctx, r)
if err == nil {
return (*MemberUpdateResponse)(resp), nil
}
if isHalted(ctx, err) {
return nil, err
}
err = c.switchRemote(err)
if err != nil {
return nil, err
}
}
}
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
// it is safe to retry on list.
for {
resp, err := c.getRemote().MemberList(ctx, &pb.MemberListRequest{})
if err == nil {
return (*MemberListResponse)(resp), nil
}
if isHalted(ctx, err) {
return nil, err
}
err = c.switchRemote(err)
if err != nil {
return nil, err
}
}
}
func (c *cluster) MemberLeader(ctx context.Context) (*Member, error) {
resp, err := c.MemberList(ctx)
if err != nil {
return nil, err
}
for _, m := range resp.Members {
if m.IsLeader {
return (*Member)(m), nil
}
}
return nil, nil
}
func (c *cluster) getRemote() pb.ClusterClient {
c.mu.Lock()
defer c.mu.Unlock()
return c.remote
}
func (c *cluster) switchRemote(prevErr error) error {
newConn, err := c.c.retryConnection(c.conn, prevErr)
if err != nil {
return err
}
c.mu.Lock()
defer c.mu.Unlock()
c.conn = newConn
c.remote = pb.NewClusterClient(c.conn)
return nil
}

91
vendor/github.com/coreos/etcd/clientv3/compare.go generated vendored Normal file
View File

@@ -0,0 +1,91 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
type CompareTarget int
type CompareResult int
const (
CompareVersion CompareTarget = iota
CompareCreated
CompareModified
CompareValue
)
type Cmp pb.Compare
func Compare(cmp Cmp, result string, v interface{}) Cmp {
var r pb.Compare_CompareResult
switch result {
case "=":
r = pb.Compare_EQUAL
case ">":
r = pb.Compare_GREATER
case "<":
r = pb.Compare_LESS
default:
panic("Unknown result op")
}
cmp.Result = r
switch cmp.Target {
case pb.Compare_VALUE:
val, ok := v.(string)
if !ok {
panic("bad compare value")
}
cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
case pb.Compare_VERSION:
cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
case pb.Compare_CREATE:
cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
case pb.Compare_MOD:
cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
default:
panic("Unknown compare type")
}
return cmp
}
func Value(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
}
func Version(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
}
func CreateRevision(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
}
func ModRevision(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
}
func mustInt64(val interface{}) int64 {
if v, ok := val.(int64); ok {
return v
}
if v, ok := val.(int); ok {
return int64(v)
}
panic("bad value")
}

View File

@@ -0,0 +1,17 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package concurrency implements concurrency operations on top of
// etcd such as distributed locks, barriers, and elections.
package concurrency

View File

@@ -0,0 +1,183 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"errors"
v3 "github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/storage/storagepb"
"golang.org/x/net/context"
)
var (
ErrElectionNotLeader = errors.New("election: not leader")
ErrElectionNoLeader = errors.New("election: no leader")
)
type Election struct {
client *v3.Client
keyPrefix string
leaderKey string
leaderRev int64
leaderSession *Session
}
// NewElection returns a new election on a given key prefix.
func NewElection(client *v3.Client, pfx string) *Election {
return &Election{client: client, keyPrefix: pfx}
}
// Campaign puts a value as eligible for the election. It blocks until
// it is elected, an error occurs, or the context is cancelled.
func (e *Election) Campaign(ctx context.Context, val string) error {
s, serr := NewSession(e.client)
if serr != nil {
return serr
}
k, rev, err := NewUniqueKV(ctx, e.client, e.keyPrefix, val, v3.WithLease(s.Lease()))
if err == nil {
err = waitDeletes(ctx, e.client, e.keyPrefix, v3.WithPrefix(), v3.WithRev(rev-1))
}
if err != nil {
// clean up in case of context cancel
select {
case <-ctx.Done():
e.client.Delete(e.client.Ctx(), k)
default:
}
return err
}
e.leaderKey, e.leaderRev, e.leaderSession = k, rev, s
return nil
}
// Proclaim lets the leader announce a new value without another election.
func (e *Election) Proclaim(ctx context.Context, val string) error {
if e.leaderSession == nil {
return ErrElectionNotLeader
}
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
txn := e.client.Txn(ctx).If(cmp)
txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
tresp, terr := txn.Commit()
if terr != nil {
return terr
}
if !tresp.Succeeded {
e.leaderKey = ""
return ErrElectionNotLeader
}
return nil
}
// Resign lets a leader start a new election.
func (e *Election) Resign() (err error) {
if e.leaderSession == nil {
return nil
}
_, err = e.client.Delete(e.client.Ctx(), e.leaderKey)
e.leaderKey = ""
e.leaderSession = nil
return err
}
// Leader returns the leader value for the current election.
func (e *Election) Leader() (string, error) {
resp, err := e.client.Get(e.client.Ctx(), e.keyPrefix, v3.WithFirstCreate()...)
if err != nil {
return "", err
} else if len(resp.Kvs) == 0 {
// no leader currently elected
return "", ErrElectionNoLeader
}
return string(resp.Kvs[0].Value), nil
}
// Observe returns a channel that observes all leader proposal values as
// GetResponse values on the current leader key. The channel closes when
// the context is cancelled or the underlying watcher is otherwise disrupted.
func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
retc := make(chan v3.GetResponse)
go e.observe(ctx, retc)
return retc
}
func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
defer close(ch)
for {
resp, err := e.client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
if err != nil {
return
}
var kv *storagepb.KeyValue
cctx, cancel := context.WithCancel(ctx)
if len(resp.Kvs) == 0 {
// wait for first key put on prefix
opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
wch := e.client.Watch(cctx, e.keyPrefix, opts...)
for kv == nil {
wr, ok := <-wch
if !ok || wr.Err() != nil {
cancel()
return
}
// only accept PUTs; a DELETE will make observe() spin
for _, ev := range wr.Events {
if ev.Type == storagepb.PUT {
kv = ev.Kv
break
}
}
}
} else {
kv = resp.Kvs[0]
}
wch := e.client.Watch(cctx, string(kv.Key), v3.WithRev(kv.ModRevision))
keyDeleted := false
for !keyDeleted {
wr, ok := <-wch
if !ok {
return
}
for _, ev := range wr.Events {
if ev.Type == storagepb.DELETE {
keyDeleted = true
break
}
resp.Header = &wr.Header
resp.Kvs = []*storagepb.KeyValue{ev.Kv}
select {
case ch <- *resp:
case <-cctx.Done():
return
}
}
}
cancel()
}
}
// Key returns the leader key if elected, empty string otherwise.
func (e *Election) Key() string { return e.leaderKey }

View File

@@ -0,0 +1,103 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"fmt"
"math"
"time"
v3 "github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/storage/storagepb"
"golang.org/x/net/context"
)
// NewUniqueKey creates a new key from a given prefix.
func NewUniqueKey(ctx context.Context, kv v3.KV, pfx string, opts ...v3.OpOption) (string, int64, error) {
return NewUniqueKV(ctx, kv, pfx, "", opts...)
}
func NewUniqueKV(ctx context.Context, kv v3.KV, pfx, val string, opts ...v3.OpOption) (string, int64, error) {
for {
newKey := fmt.Sprintf("%s/%v", pfx, time.Now().UnixNano())
put := v3.OpPut(newKey, val, opts...)
cmp := v3.Compare(v3.ModRevision(newKey), "=", 0)
resp, err := kv.Txn(ctx).If(cmp).Then(put).Commit()
if err != nil {
return "", 0, err
}
if !resp.Succeeded {
continue
}
return newKey, resp.Header.Revision, nil
}
}
func waitUpdate(ctx context.Context, client *v3.Client, key string, opts ...v3.OpOption) error {
cctx, cancel := context.WithCancel(ctx)
defer cancel()
wresp, ok := <-client.Watch(cctx, key, opts...)
if !ok {
return ctx.Err()
}
return wresp.Err()
}
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
cctx, cancel := context.WithCancel(ctx)
defer cancel()
wch := client.Watch(cctx, key, v3.WithRev(rev))
for wr := range wch {
for _, ev := range wr.Events {
if ev.Type == storagepb.DELETE {
return nil
}
}
}
if err := ctx.Err(); err != nil {
return err
}
return fmt.Errorf("lost watcher waiting for delete")
}
// waitDeletes efficiently waits until all keys matched by Get(key, opts...) are deleted
func waitDeletes(ctx context.Context, client *v3.Client, key string, opts ...v3.OpOption) error {
getOpts := []v3.OpOption{v3.WithSort(v3.SortByCreateRevision, v3.SortAscend)}
getOpts = append(getOpts, opts...)
resp, err := client.Get(ctx, key, getOpts...)
maxRev := int64(math.MaxInt64)
getOpts = append(getOpts, v3.WithRev(0))
for err == nil {
for len(resp.Kvs) > 0 {
i := len(resp.Kvs) - 1
if resp.Kvs[i].CreateRevision <= maxRev {
break
}
resp.Kvs = resp.Kvs[:i]
}
if len(resp.Kvs) == 0 {
break
}
lastKV := resp.Kvs[len(resp.Kvs)-1]
maxRev = lastKV.CreateRevision
err = waitDelete(ctx, client, string(lastKV.Key), maxRev)
if err != nil || len(resp.Kvs) == 1 {
break
}
getOpts = append(getOpts, v3.WithLimit(int64(len(resp.Kvs)-1)))
resp, err = client.Get(ctx, key, getOpts...)
}
return err
}

View File

@@ -0,0 +1,88 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"sync"
v3 "github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
// Mutex implements the sync Locker interface with etcd
type Mutex struct {
client *v3.Client
pfx string
myKey string
myRev int64
}
func NewMutex(client *v3.Client, pfx string) *Mutex {
return &Mutex{client, pfx, "", -1}
}
// Lock locks the mutex with a cancellable context. If the context is cancelled
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
func (m *Mutex) Lock(ctx context.Context) error {
s, err := NewSession(m.client)
if err != nil {
return err
}
// put self in lock waiters via myKey; oldest waiter holds lock
m.myKey, m.myRev, err = NewUniqueKey(ctx, m.client, m.pfx, v3.WithLease(s.Lease()))
// wait for deletion revisions prior to myKey
err = waitDeletes(ctx, m.client, m.pfx, v3.WithPrefix(), v3.WithRev(m.myRev-1))
// release lock key if cancelled
select {
case <-ctx.Done():
m.Unlock()
default:
}
return err
}
func (m *Mutex) Unlock() error {
if _, err := m.client.Delete(m.client.Ctx(), m.myKey); err != nil {
return err
}
m.myKey = "\x00"
m.myRev = -1
return nil
}
func (m *Mutex) IsOwner() v3.Cmp {
return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
}
func (m *Mutex) Key() string { return m.myKey }
type lockerMutex struct{ *Mutex }
func (lm *lockerMutex) Lock() {
if err := lm.Mutex.Lock(lm.client.Ctx()); err != nil {
panic(err)
}
}
func (lm *lockerMutex) Unlock() {
if err := lm.Mutex.Unlock(); err != nil {
panic(err)
}
}
// NewLocker creates a sync.Locker backed by an etcd mutex.
func NewLocker(client *v3.Client, pfx string) sync.Locker {
return &lockerMutex{NewMutex(client, pfx)}
}

View File

@@ -0,0 +1,104 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
"sync"
v3 "github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
// only keep one ephemeral lease per client
var clientSessions clientSessionMgr = clientSessionMgr{sessions: make(map[*v3.Client]*Session)}
const sessionTTL = 60
type clientSessionMgr struct {
sessions map[*v3.Client]*Session
mu sync.Mutex
}
// Session represents a lease kept alive for the lifetime of a client.
// Fault-tolerant applications may use sessions to reason about liveness.
type Session struct {
client *v3.Client
id v3.LeaseID
cancel context.CancelFunc
donec <-chan struct{}
}
// NewSession gets the leased session for a client.
func NewSession(client *v3.Client) (*Session, error) {
clientSessions.mu.Lock()
defer clientSessions.mu.Unlock()
if s, ok := clientSessions.sessions[client]; ok {
return s, nil
}
resp, err := client.Grant(client.Ctx(), sessionTTL)
if err != nil {
return nil, err
}
id := v3.LeaseID(resp.ID)
ctx, cancel := context.WithCancel(client.Ctx())
keepAlive, err := client.KeepAlive(ctx, id)
if err != nil || keepAlive == nil {
return nil, err
}
donec := make(chan struct{})
s := &Session{client: client, id: id, cancel: cancel, donec: donec}
clientSessions.sessions[client] = s
// keep the lease alive until client error or cancelled context
go func() {
defer func() {
clientSessions.mu.Lock()
delete(clientSessions.sessions, client)
clientSessions.mu.Unlock()
close(donec)
}()
for range keepAlive {
// eat messages until keep alive channel closes
}
}()
return s, nil
}
// Lease is the lease ID for keys bound to the session.
func (s *Session) Lease() v3.LeaseID { return s.id }
// Done returns a channel that closes when the lease is orphaned, expires, or
// is otherwise no longer being refreshed.
func (s *Session) Done() <-chan struct{} { return s.donec }
// Orphan ends the refresh for the session lease. This is useful
// in case the state of the client connection is indeterminate (revoke
// would fail) or when transferring lease ownership.
func (s *Session) Orphan() {
s.cancel()
<-s.donec
}
// Close orphans the session and revokes the session lease.
func (s *Session) Close() error {
s.Orphan()
_, err := s.client.Revoke(s.client.Ctx(), s.id)
return err
}

View File

@@ -0,0 +1,246 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrency
import (
v3 "github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
// STM is an interface for software transactional memory.
type STM interface {
// Get returns the value for a key and inserts the key in the txn's read set.
// If Get fails, it aborts the transaction with an error, never returning.
Get(key string) string
// Put adds a value for a key to the write set.
Put(key, val string, opts ...v3.OpOption)
// Rev returns the revision of a key in the read set.
Rev(key string) int64
// Del deletes a key.
Del(key string)
// commit attempts to apply the txn's changes to the server.
commit() *v3.TxnResponse
reset()
}
// stmError safely passes STM errors through panic to the STM error channel.
type stmError struct{ err error }
// NewSTMRepeatable initiates new repeatable read transaction; reads within
// the same transaction attempt always return the same data.
func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
s := &stm{client: c, ctx: ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
return runSTM(s, apply)
}
// NewSTMSerializable initiates a new serialized transaction; reads within the
// same transactiona attempt return data from the revision of the first read.
func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
s := &stmSerializable{
stm: stm{client: c, ctx: ctx},
prefetch: make(map[string]*v3.GetResponse),
}
return runSTM(s, apply)
}
type stmResponse struct {
resp *v3.TxnResponse
err error
}
func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
outc := make(chan stmResponse, 1)
go func() {
defer func() {
if r := recover(); r != nil {
e, ok := r.(stmError)
if !ok {
// client apply panicked
panic(r)
}
outc <- stmResponse{nil, e.err}
}
}()
var out stmResponse
for {
s.reset()
if out.err = apply(s); out.err != nil {
break
}
if out.resp = s.commit(); out.resp != nil {
break
}
}
outc <- out
}()
r := <-outc
return r.resp, r.err
}
// stm implements repeatable-read software transactional memory over etcd
type stm struct {
client *v3.Client
ctx context.Context
// rset holds read key values and revisions
rset map[string]*v3.GetResponse
// wset holds overwritten keys and their values
wset map[string]stmPut
// getOpts are the opts used for gets
getOpts []v3.OpOption
}
type stmPut struct {
val string
op v3.Op
}
func (s *stm) Get(key string) string {
if wv, ok := s.wset[key]; ok {
return wv.val
}
return respToValue(s.fetch(key))
}
func (s *stm) Put(key, val string, opts ...v3.OpOption) {
s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
}
func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
func (s *stm) Rev(key string) int64 {
if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
return resp.Kvs[0].ModRevision
}
return 0
}
func (s *stm) commit() *v3.TxnResponse {
txnresp, err := s.client.Txn(s.ctx).If(s.cmps()...).Then(s.puts()...).Commit()
if err != nil {
panic(stmError{err})
}
if txnresp.Succeeded {
return txnresp
}
return nil
}
// cmps guards the txn from updates to read set
func (s *stm) cmps() (cmps []v3.Cmp) {
for k, rk := range s.rset {
cmps = append(cmps, isKeyCurrent(k, rk))
}
return
}
func (s *stm) fetch(key string) *v3.GetResponse {
if resp, ok := s.rset[key]; ok {
return resp
}
resp, err := s.client.Get(s.ctx, key, s.getOpts...)
if err != nil {
panic(stmError{err})
}
s.rset[key] = resp
return resp
}
// puts is the list of ops for all pending writes
func (s *stm) puts() (puts []v3.Op) {
for _, v := range s.wset {
puts = append(puts, v.op)
}
return
}
func (s *stm) reset() {
s.rset = make(map[string]*v3.GetResponse)
s.wset = make(map[string]stmPut)
}
type stmSerializable struct {
stm
prefetch map[string]*v3.GetResponse
}
func (s *stmSerializable) Get(key string) string {
if wv, ok := s.wset[key]; ok {
return wv.val
}
firstRead := len(s.rset) == 0
if resp, ok := s.prefetch[key]; ok {
delete(s.prefetch, key)
s.rset[key] = resp
}
resp := s.stm.fetch(key)
if firstRead {
// txn's base revision is defined by the first read
s.getOpts = []v3.OpOption{
v3.WithRev(resp.Header.Revision),
v3.WithSerializable(),
}
}
return respToValue(resp)
}
func (s *stmSerializable) Rev(key string) int64 {
s.Get(key)
return s.stm.Rev(key)
}
func (s *stmSerializable) gets() (keys []string, ops []v3.Op) {
for k := range s.rset {
keys = append(keys, k)
ops = append(ops, v3.OpGet(k))
}
return
}
func (s *stmSerializable) commit() *v3.TxnResponse {
keys, getops := s.gets()
txn := s.client.Txn(s.ctx).If(s.cmps()...).Then(s.puts()...)
// use Else to prefetch keys in case of conflict to save a round trip
txnresp, err := txn.Else(getops...).Commit()
if err != nil {
panic(stmError{err})
}
if txnresp.Succeeded {
return txnresp
}
// load prefetch with Else data
for i := range keys {
resp := txnresp.Responses[i].GetResponseRange()
s.rset[keys[i]] = (*v3.GetResponse)(resp)
}
s.prefetch = s.rset
s.getOpts = nil
return nil
}
func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
rev := r.Header.Revision + 1
if len(r.Kvs) != 0 {
rev = r.Kvs[0].ModRevision + 1
}
return v3.Compare(v3.ModRevision(k), "<", rev)
}
func respToValue(resp *v3.GetResponse) string {
if len(resp.Kvs) == 0 {
return ""
}
return string(resp.Kvs[0].Value)
}

111
vendor/github.com/coreos/etcd/clientv3/config.go generated vendored Normal file
View File

@@ -0,0 +1,111 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"time"
"github.com/coreos/etcd/pkg/tlsutil"
"github.com/ghodss/yaml"
"google.golang.org/grpc"
)
// EndpointDialer is a policy for choosing which endpoint to dial next
type EndpointDialer func(*Client) (*grpc.ClientConn, error)
type Config struct {
// Endpoints is a list of URLs
Endpoints []string
// RetryDialer chooses the next endpoint to use
RetryDialer EndpointDialer
// DialTimeout is the timeout for failing to establish a connection.
DialTimeout time.Duration
// TLS holds the client secure credentials, if any.
TLS *tls.Config
// Logger is the logger used by client library.
Logger Logger
}
type YamlConfig struct {
Endpoints []string `json:"endpoints"`
DialTimeout time.Duration `json:"dial-timeout"`
InsecureTransport bool `json:"insecure-transport"`
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
Certfile string `json:"cert-file"`
Keyfile string `json:"key-file"`
CAfile string `json:"ca-file"`
}
func configFromFile(fpath string) (*Config, error) {
b, err := ioutil.ReadFile(fpath)
if err != nil {
return nil, err
}
yc := &YamlConfig{}
err = yaml.Unmarshal(b, yc)
if err != nil {
return nil, err
}
cfg := &Config{
Endpoints: yc.Endpoints,
DialTimeout: yc.DialTimeout,
}
if yc.InsecureTransport {
cfg.TLS = nil
return cfg, nil
}
var (
cert *tls.Certificate
cp *x509.CertPool
)
if yc.Certfile != "" && yc.Keyfile != "" {
cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil)
if err != nil {
return nil, err
}
}
if yc.CAfile != "" {
cp, err = tlsutil.NewCertPool([]string{yc.CAfile})
if err != nil {
return nil, err
}
}
tlscfg := &tls.Config{
MinVersion: tls.VersionTLS10,
InsecureSkipVerify: yc.InsecureSkipTLSVerify,
RootCAs: cp,
}
if cert != nil {
tlscfg.Certificates = []tls.Certificate{*cert}
}
cfg.TLS = tlscfg
return cfg, nil
}

64
vendor/github.com/coreos/etcd/clientv3/doc.go generated vendored Normal file
View File

@@ -0,0 +1,64 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package clientv3 implements the official Go etcd client for v3.
//
// Create client using `clientv3.New`:
//
// cli, err := clientv3.New(clientv3.Config{
// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
// DialTimeout: 5 * time.Second,
// })
// if err != nil {
// // handle error!
// }
// defer cli.Close()
//
// Make sure to close the client after using it. If the client is not closed, the
// connection will have leaky goroutines.
//
// To specify client request timeout, pass context.WithTimeout to APIs:
//
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
// cancel()
// if err != nil {
// // handle error!
// }
// // use the response
//
// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
// Clients are safe for concurrent use by multiple goroutines.
//
// etcd client returns 2 types of errors:
//
// 1. context error: canceled or deadline exceeded.
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/error.go.
//
// Here is the example code to handle client errors:
//
// resp, err := kvc.Put(ctx, "", "")
// if err != nil {
// if err == context.Canceled {
// // ctx is canceled by another routine
// } else if err == context.DeadlineExceeded {
// // ctx is attached with a deadline and it exceeded
// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
// // process (verr.Errors)
// } else {
// // bad cluster endpoints, which are not etcd servers
// }
// }
//
package clientv3

View File

@@ -0,0 +1,17 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package integration implements tests built upon embedded etcd, and focuses on
// correctness of etcd client.
package integration

207
vendor/github.com/coreos/etcd/clientv3/kv.go generated vendored Normal file
View File

@@ -0,0 +1,207 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"sync"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
PutResponse pb.PutResponse
GetResponse pb.RangeResponse
DeleteResponse pb.DeleteRangeResponse
TxnResponse pb.TxnResponse
)
type KV interface {
// Put puts a key-value pair into etcd.
// Note that key,value can be plain bytes array and string is
// an immutable representation of that bytes array.
// To get a string of bytes, do string([]byte(0x10, 0x20)).
Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
// Get retrieves keys.
// By default, Get will return the value for "key", if any.
// When passed WithRange(end), Get will return the keys in the range [key, end).
// When passed WithFromKey(), Get returns keys greater than or equal to key.
// When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
// if the required revision is compacted, the request will fail with ErrCompacted .
// When passed WithLimit(limit), the number of returned keys is bounded by limit.
// When passed WithSort(), the keys will be sorted.
Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
// Delete deletes a key, or optionally using WithRange(end), [key, end).
Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
// Compact compacts etcd KV history before the given rev.
Compact(ctx context.Context, rev int64) error
// Do applies a single Op on KV without a transaction.
// Do is useful when declaring operations to be issued at a later time
// whereas Get/Put/Delete are for better suited for when the operation
// should be immediately issued at time of declaration.
// Do applies a single Op on KV without a transaction.
// Do is useful when creating arbitrary operations to be issued at a
// later time; the user can range over the operations, calling Do to
// execute them. Get/Put/Delete, on the other hand, are best suited
// for when the operation should be issued at the time of declaration.
Do(ctx context.Context, op Op) (OpResponse, error)
// Txn creates a transaction.
Txn(ctx context.Context) Txn
}
type OpResponse struct {
put *PutResponse
get *GetResponse
del *DeleteResponse
}
type kv struct {
c *Client
mu sync.Mutex // guards all fields
conn *grpc.ClientConn // conn in-use
remote pb.KVClient
}
func NewKV(c *Client) KV {
conn := c.ActiveConnection()
remote := pb.NewKVClient(conn)
return &kv{
conn: c.ActiveConnection(),
remote: remote,
c: c,
}
}
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
r, err := kv.Do(ctx, OpPut(key, val, opts...))
return r.put, err
}
func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
r, err := kv.Do(ctx, OpGet(key, opts...))
return r.get, err
}
func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
r, err := kv.Do(ctx, OpDelete(key, opts...))
return r.del, err
}
func (kv *kv) Compact(ctx context.Context, rev int64) error {
r := &pb.CompactionRequest{Revision: rev}
_, err := kv.getRemote().Compact(ctx, r)
if err == nil {
return nil
}
if isHalted(ctx, err) {
return err
}
go kv.switchRemote(err)
return err
}
func (kv *kv) Txn(ctx context.Context) Txn {
return &txn{
kv: kv,
ctx: ctx,
}
}
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
for {
var err error
switch op.t {
// TODO: handle other ops
case tRange:
var resp *pb.RangeResponse
r := &pb.RangeRequest{Key: op.key, RangeEnd: op.end, Limit: op.limit, Revision: op.rev, Serializable: op.serializable}
if op.sort != nil {
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
}
resp, err = kv.getRemote().Range(ctx, r)
if err == nil {
return OpResponse{get: (*GetResponse)(resp)}, nil
}
case tPut:
var resp *pb.PutResponse
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)}
resp, err = kv.getRemote().Put(ctx, r)
if err == nil {
return OpResponse{put: (*PutResponse)(resp)}, nil
}
case tDeleteRange:
var resp *pb.DeleteRangeResponse
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end}
resp, err = kv.getRemote().DeleteRange(ctx, r)
if err == nil {
return OpResponse{del: (*DeleteResponse)(resp)}, nil
}
default:
panic("Unknown op")
}
if isHalted(ctx, err) {
return OpResponse{}, err
}
// do not retry on modifications
if op.isWrite() {
go kv.switchRemote(err)
return OpResponse{}, err
}
if nerr := kv.switchRemote(err); nerr != nil {
return OpResponse{}, nerr
}
}
}
func (kv *kv) switchRemote(prevErr error) error {
// Usually it's a bad idea to lock on network i/o but here it's OK
// since the link is down and new requests can't be processed anyway.
// Likewise, if connecting stalls, closing the Client can break the
// lock via context cancelation.
kv.mu.Lock()
defer kv.mu.Unlock()
newConn, err := kv.c.retryConnection(kv.conn, prevErr)
if err != nil {
return err
}
kv.conn = newConn
kv.remote = pb.NewKVClient(kv.conn)
return nil
}
func (kv *kv) getRemote() pb.KVClient {
kv.mu.Lock()
defer kv.mu.Unlock()
return kv.remote
}

433
vendor/github.com/coreos/etcd/clientv3/lease.go generated vendored Normal file
View File

@@ -0,0 +1,433 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"sync"
"time"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
LeaseGrantResponse pb.LeaseGrantResponse
LeaseRevokeResponse pb.LeaseRevokeResponse
LeaseKeepAliveResponse pb.LeaseKeepAliveResponse
LeaseID int64
)
const (
// a small buffer to store unsent lease responses.
leaseResponseChSize = 16
// NoLease is a lease ID for the absence of a lease.
NoLease LeaseID = 0
)
type Lease interface {
// Grant creates a new lease.
Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
// Revoke revokes the given lease.
Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
// KeepAlive keeps the given lease alive forever.
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
// KeepAliveOnce renews the lease once. In most of the cases, Keepalive
// should be used instead of KeepAliveOnce.
KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
// Close releases all resources Lease keeps for efficient communication
// with the etcd server.
Close() error
}
type lessor struct {
c *Client
mu sync.Mutex // guards all fields
conn *grpc.ClientConn // conn in-use
// donec is closed when recvKeepAliveLoop stops
donec chan struct{}
remote pb.LeaseClient
stream pb.Lease_LeaseKeepAliveClient
streamCancel context.CancelFunc
stopCtx context.Context
stopCancel context.CancelFunc
keepAlives map[LeaseID]*keepAlive
}
// keepAlive multiplexes a keepalive for a lease over multiple channels
type keepAlive struct {
chs []chan<- *LeaseKeepAliveResponse
ctxs []context.Context
// deadline is the next time to send a keep alive message
deadline time.Time
// donec is closed on lease revoke, expiration, or cancel.
donec chan struct{}
}
func NewLease(c *Client) Lease {
l := &lessor{
c: c,
conn: c.ActiveConnection(),
donec: make(chan struct{}),
keepAlives: make(map[LeaseID]*keepAlive),
}
l.remote = pb.NewLeaseClient(l.conn)
l.stopCtx, l.stopCancel = context.WithCancel(context.Background())
go l.recvKeepAliveLoop()
return l
}
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
cctx, cancel := context.WithCancel(ctx)
done := cancelWhenStop(cancel, l.stopCtx.Done())
defer close(done)
for {
r := &pb.LeaseGrantRequest{TTL: ttl}
resp, err := l.getRemote().LeaseGrant(cctx, r)
if err == nil {
return (*LeaseGrantResponse)(resp), nil
}
if isHalted(cctx, err) {
return nil, err
}
if nerr := l.switchRemoteAndStream(err); nerr != nil {
return nil, nerr
}
}
}
func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
cctx, cancel := context.WithCancel(ctx)
done := cancelWhenStop(cancel, l.stopCtx.Done())
defer close(done)
for {
r := &pb.LeaseRevokeRequest{ID: int64(id)}
resp, err := l.getRemote().LeaseRevoke(cctx, r)
if err == nil {
return (*LeaseRevokeResponse)(resp), nil
}
if isHalted(ctx, err) {
return nil, err
}
if nerr := l.switchRemoteAndStream(err); nerr != nil {
return nil, nerr
}
}
}
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
l.mu.Lock()
ka, ok := l.keepAlives[id]
if !ok {
// create fresh keep alive
ka = &keepAlive{
chs: []chan<- *LeaseKeepAliveResponse{ch},
ctxs: []context.Context{ctx},
deadline: time.Now(),
donec: make(chan struct{}),
}
l.keepAlives[id] = ka
} else {
// add channel and context to existing keep alive
ka.ctxs = append(ka.ctxs, ctx)
ka.chs = append(ka.chs, ch)
}
l.mu.Unlock()
go l.keepAliveCtxCloser(id, ctx, ka.donec)
return ch, nil
}
func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
cctx, cancel := context.WithCancel(ctx)
done := cancelWhenStop(cancel, l.stopCtx.Done())
defer close(done)
for {
resp, err := l.keepAliveOnce(cctx, id)
if err == nil {
return resp, err
}
nerr := l.switchRemoteAndStream(err)
if nerr != nil {
return nil, nerr
}
}
}
func (l *lessor) Close() error {
l.stopCancel()
<-l.donec
return nil
}
func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
select {
case <-donec:
return
case <-l.donec:
return
case <-ctx.Done():
}
l.mu.Lock()
defer l.mu.Unlock()
ka, ok := l.keepAlives[id]
if !ok {
return
}
// close channel and remove context if still associated with keep alive
for i, c := range ka.ctxs {
if c == ctx {
close(ka.chs[i])
ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
break
}
}
// remove if no one more listeners
if len(ka.chs) == 0 {
delete(l.keepAlives, id)
}
}
func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
stream, err := l.getRemote().LeaseKeepAlive(ctx)
if err != nil {
return nil, err
}
err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
if err != nil {
return nil, err
}
resp, rerr := stream.Recv()
if rerr != nil {
return nil, rerr
}
return (*LeaseKeepAliveResponse)(resp), nil
}
func (l *lessor) recvKeepAliveLoop() {
defer func() {
l.stopCancel()
l.mu.Lock()
close(l.donec)
for _, ka := range l.keepAlives {
ka.Close()
}
l.keepAlives = make(map[LeaseID]*keepAlive)
l.mu.Unlock()
}()
stream, serr := l.resetRecv()
for serr == nil {
resp, err := stream.Recv()
if err != nil {
if isHalted(l.stopCtx, err) {
return
}
stream, serr = l.resetRecv()
continue
}
l.recvKeepAlive(resp)
}
}
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
if err := l.switchRemoteAndStream(nil); err != nil {
return nil, err
}
stream := l.getKeepAliveStream()
go l.sendKeepAliveLoop(stream)
return stream, nil
}
// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
id := LeaseID(resp.ID)
l.mu.Lock()
defer l.mu.Unlock()
ka, ok := l.keepAlives[id]
if !ok {
return
}
if resp.TTL <= 0 {
// lease expired; close all keep alive channels
delete(l.keepAlives, id)
ka.Close()
return
}
// send update to all channels
nextDeadline := time.Now().Add(1 + time.Duration(resp.TTL/3)*time.Second)
for _, ch := range ka.chs {
select {
case ch <- (*LeaseKeepAliveResponse)(resp):
ka.deadline = nextDeadline
default:
}
}
}
// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
for {
select {
case <-time.After(500 * time.Millisecond):
case <-l.donec:
return
case <-l.stopCtx.Done():
return
}
tosend := make([]LeaseID, 0)
now := time.Now()
l.mu.Lock()
for id, ka := range l.keepAlives {
if ka.deadline.Before(now) {
tosend = append(tosend, id)
}
}
l.mu.Unlock()
for _, id := range tosend {
r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
if err := stream.Send(r); err != nil {
// TODO do something with this error?
return
}
}
}
}
func (l *lessor) getRemote() pb.LeaseClient {
l.mu.Lock()
defer l.mu.Unlock()
return l.remote
}
func (l *lessor) getKeepAliveStream() pb.Lease_LeaseKeepAliveClient {
l.mu.Lock()
defer l.mu.Unlock()
return l.stream
}
func (l *lessor) switchRemoteAndStream(prevErr error) error {
l.mu.Lock()
conn := l.conn
l.mu.Unlock()
var (
err error
newConn *grpc.ClientConn
)
if prevErr != nil {
conn.Close()
newConn, err = l.c.retryConnection(conn, prevErr)
if err != nil {
return err
}
}
l.mu.Lock()
if newConn != nil {
l.conn = newConn
}
l.remote = pb.NewLeaseClient(l.conn)
l.mu.Unlock()
serr := l.newStream()
if serr != nil {
return serr
}
return nil
}
func (l *lessor) newStream() error {
sctx, cancel := context.WithCancel(l.stopCtx)
stream, err := l.getRemote().LeaseKeepAlive(sctx)
if err != nil {
cancel()
return err
}
l.mu.Lock()
defer l.mu.Unlock()
if l.stream != nil && l.streamCancel != nil {
l.stream.CloseSend()
l.streamCancel()
}
l.streamCancel = cancel
l.stream = stream
return nil
}
func (ka *keepAlive) Close() {
close(ka.donec)
for _, ch := range ka.chs {
close(ch)
}
}
// cancelWhenStop calls cancel when the given stopc fires. It returns a done chan. done
// should be closed when the work is finished. When done fires, cancelWhenStop will release
// its internal resource.
func cancelWhenStop(cancel context.CancelFunc, stopc <-chan struct{}) chan<- struct{} {
done := make(chan struct{}, 1)
go func() {
select {
case <-stopc:
case <-done:
}
cancel()
}()
return done
}

64
vendor/github.com/coreos/etcd/clientv3/logger.go generated vendored Normal file
View File

@@ -0,0 +1,64 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"log"
"os"
"sync"
"google.golang.org/grpc/grpclog"
)
type Logger grpclog.Logger
var (
logger settableLogger
)
type settableLogger struct {
l grpclog.Logger
mu sync.RWMutex
}
func init() {
// use go's standard logger by default like grpc
logger.mu.Lock()
logger.l = log.New(os.Stderr, "", log.LstdFlags)
grpclog.SetLogger(&logger)
logger.mu.Unlock()
}
func (s *settableLogger) Set(l Logger) {
s.mu.Lock()
logger.l = l
s.mu.Unlock()
}
func (s *settableLogger) Get() Logger {
s.mu.RLock()
l := logger.l
s.mu.RUnlock()
return l
}
// implement the grpclog.Logger interface
func (s *settableLogger) Fatal(args ...interface{}) { s.Get().Fatal(args...) }
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.Get().Fatalf(format, args...) }
func (s *settableLogger) Fatalln(args ...interface{}) { s.Get().Fatalln(args...) }
func (s *settableLogger) Print(args ...interface{}) { s.Get().Print(args...) }
func (s *settableLogger) Printf(format string, args ...interface{}) { s.Get().Printf(format, args...) }
func (s *settableLogger) Println(args ...interface{}) { s.Get().Println(args...) }

164
vendor/github.com/coreos/etcd/clientv3/maintenance.go generated vendored Normal file
View File

@@ -0,0 +1,164 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"sync"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
DefragmentResponse pb.DefragmentResponse
AlarmResponse pb.AlarmResponse
AlarmMember pb.AlarmMember
StatusResponse pb.StatusResponse
)
type Maintenance interface {
// AlarmList gets all active alarms.
AlarmList(ctx context.Context) (*AlarmResponse, error)
// AlarmDisarm disarms a given alarm.
AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
// Defragment defragments storage backend of the etcd member with given endpoint.
// Defragment is only needed when deleting a large number of keys and want to reclaim
// the resources.
// Defragment is an expensive operation. User should avoid defragmenting multiple members
// at the same time.
// To defragment multiple members in the cluster, user need to call defragment multiple
// times with different endpoints.
Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
// Status gets the status of the member.
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
}
type maintenance struct {
c *Client
mu sync.Mutex
conn *grpc.ClientConn // conn in-use
remote pb.MaintenanceClient
}
func NewMaintenance(c *Client) Maintenance {
conn := c.ActiveConnection()
return &maintenance{
c: c,
conn: conn,
remote: pb.NewMaintenanceClient(conn),
}
}
func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
req := &pb.AlarmRequest{
Action: pb.AlarmRequest_GET,
MemberID: 0, // all
Alarm: pb.AlarmType_NONE, // all
}
for {
resp, err := m.getRemote().Alarm(ctx, req)
if err == nil {
return (*AlarmResponse)(resp), nil
}
if isHalted(ctx, err) {
return nil, err
}
if err = m.switchRemote(err); err != nil {
return nil, err
}
}
}
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
req := &pb.AlarmRequest{
Action: pb.AlarmRequest_DEACTIVATE,
MemberID: am.MemberID,
Alarm: am.Alarm,
}
if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
ar, err := m.AlarmList(ctx)
if err != nil {
return nil, err
}
ret := AlarmResponse{}
for _, am := range ar.Alarms {
dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
if derr != nil {
return nil, derr
}
ret.Alarms = append(ret.Alarms, dresp.Alarms...)
}
return &ret, nil
}
resp, err := m.getRemote().Alarm(ctx, req)
if err == nil {
return (*AlarmResponse)(resp), nil
}
if !isHalted(ctx, err) {
go m.switchRemote(err)
}
return nil, err
}
func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
conn, err := m.c.Dial(endpoint)
if err != nil {
return nil, err
}
remote := pb.NewMaintenanceClient(conn)
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{})
if err != nil {
return nil, err
}
return (*DefragmentResponse)(resp), nil
}
func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
conn, err := m.c.Dial(endpoint)
if err != nil {
return nil, err
}
remote := pb.NewMaintenanceClient(conn)
resp, err := remote.Status(ctx, &pb.StatusRequest{})
if err != nil {
return nil, err
}
return (*StatusResponse)(resp), nil
}
func (m *maintenance) getRemote() pb.MaintenanceClient {
m.mu.Lock()
defer m.mu.Unlock()
return m.remote
}
func (m *maintenance) switchRemote(prevErr error) error {
m.mu.Lock()
defer m.mu.Unlock()
newConn, err := m.c.retryConnection(m.conn, prevErr)
if err != nil {
return err
}
m.conn = newConn
m.remote = pb.NewMaintenanceClient(m.conn)
return nil
}

110
vendor/github.com/coreos/etcd/clientv3/mirror/syncer.go generated vendored Normal file
View File

@@ -0,0 +1,110 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package mirror implements etcd mirroring operations.
package mirror
import (
"github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
)
const (
batchLimit = 1000
)
// Syncer syncs with the key-value state of an etcd cluster.
type Syncer interface {
// SyncBase syncs the base state of the key-value state.
// The key-value state are sent through the returned chan.
SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, chan error)
// SyncUpdates syncs the updates of the key-value state.
// The update events are sent through the returned chan.
SyncUpdates(ctx context.Context) clientv3.WatchChan
}
// NewSyncer creates a Syncer.
func NewSyncer(c *clientv3.Client, prefix string, rev int64) Syncer {
return &syncer{c: c, prefix: prefix, rev: rev}
}
type syncer struct {
c *clientv3.Client
rev int64
prefix string
}
func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, chan error) {
respchan := make(chan clientv3.GetResponse, 1024)
errchan := make(chan error, 1)
// if rev is not specified, we will choose the most recent revision.
if s.rev == 0 {
resp, err := s.c.Get(ctx, "foo")
if err != nil {
errchan <- err
close(respchan)
close(errchan)
return respchan, errchan
}
s.rev = resp.Header.Revision
}
go func() {
defer close(respchan)
defer close(errchan)
var key string
opts := []clientv3.OpOption{clientv3.WithLimit(batchLimit), clientv3.WithRev(s.rev)}
if len(s.prefix) == 0 {
// If len(s.prefix) == 0, we will sync the entire key-value space.
// We then range from the smallest key (0x00) to the end.
opts = append(opts, clientv3.WithFromKey())
key = "\x00"
} else {
// If len(s.prefix) != 0, we will sync key-value space with given prefix.
// We then range from the prefix to the next prefix if exists. Or we will
// range from the prefix to the end if the next prefix does not exists.
opts = append(opts, clientv3.WithPrefix())
key = s.prefix
}
for {
resp, err := s.c.Get(ctx, key, opts...)
if err != nil {
errchan <- err
return
}
respchan <- (clientv3.GetResponse)(*resp)
if !resp.More {
return
}
// move to next key
key = string(append(resp.Kvs[len(resp.Kvs)-1].Key, 0))
}
}()
return respchan, errchan
}
func (s *syncer) SyncUpdates(ctx context.Context) clientv3.WatchChan {
if s.rev == 0 {
panic("unexpected revision = 0. Calling SyncUpdates before SyncBase finishes?")
}
return s.c.Watch(ctx, s.prefix, clientv3.WithPrefix(), clientv3.WithRev(s.rev+1))
}

240
vendor/github.com/coreos/etcd/clientv3/op.go generated vendored Normal file
View File

@@ -0,0 +1,240 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
type opType int
const (
// A default Op has opType 0, which is invalid.
tRange opType = iota + 1
tPut
tDeleteRange
)
var (
noPrefixEnd = []byte{0}
)
// Op represents an Operation that kv can execute.
type Op struct {
t opType
key []byte
end []byte
// for range
limit int64
sort *SortOption
serializable bool
// for range, watch
rev int64
// progressNotify is for progress updates.
progressNotify bool
// for put
val []byte
leaseID LeaseID
}
func (op Op) toRequestUnion() *pb.RequestUnion {
switch op.t {
case tRange:
r := &pb.RangeRequest{Key: op.key, RangeEnd: op.end, Limit: op.limit, Revision: op.rev, Serializable: op.serializable}
if op.sort != nil {
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
}
return &pb.RequestUnion{Request: &pb.RequestUnion_RequestRange{RequestRange: r}}
case tPut:
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)}
return &pb.RequestUnion{Request: &pb.RequestUnion_RequestPut{RequestPut: r}}
case tDeleteRange:
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end}
return &pb.RequestUnion{Request: &pb.RequestUnion_RequestDeleteRange{RequestDeleteRange: r}}
default:
panic("Unknown Op")
}
}
func (op Op) isWrite() bool {
return op.t != tRange
}
func OpGet(key string, opts ...OpOption) Op {
ret := Op{t: tRange, key: []byte(key)}
ret.applyOpts(opts)
return ret
}
func OpDelete(key string, opts ...OpOption) Op {
ret := Op{t: tDeleteRange, key: []byte(key)}
ret.applyOpts(opts)
switch {
case ret.leaseID != 0:
panic("unexpected lease in delete")
case ret.limit != 0:
panic("unexpected limit in delete")
case ret.rev != 0:
panic("unexpected revision in delete")
case ret.sort != nil:
panic("unexpected sort in delete")
case ret.serializable:
panic("unexpected serializable in delete")
}
return ret
}
func OpPut(key, val string, opts ...OpOption) Op {
ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
ret.applyOpts(opts)
switch {
case ret.end != nil:
panic("unexpected range in put")
case ret.limit != 0:
panic("unexpected limit in put")
case ret.rev != 0:
panic("unexpected revision in put")
case ret.sort != nil:
panic("unexpected sort in put")
case ret.serializable:
panic("unexpected serializable in delete")
}
return ret
}
func opWatch(key string, opts ...OpOption) Op {
ret := Op{t: tRange, key: []byte(key)}
ret.applyOpts(opts)
switch {
case ret.leaseID != 0:
panic("unexpected lease in watch")
case ret.limit != 0:
panic("unexpected limit in watch")
case ret.sort != nil:
panic("unexpected sort in watch")
case ret.serializable:
panic("unexpected serializable in watch")
}
return ret
}
func (op *Op) applyOpts(opts []OpOption) {
for _, opt := range opts {
opt(op)
}
}
// OpOption configures Operations like Get, Put, Delete.
type OpOption func(*Op)
// WithLease attaches a lease ID to a key in 'Put' request.
func WithLease(leaseID LeaseID) OpOption {
return func(op *Op) { op.leaseID = leaseID }
}
// WithLimit limits the number of results to return from 'Get' request.
func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
// WithRev specifies the store revision for 'Get' request.
// Or the start revision of 'Watch' request.
func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
// WithSort specifies the ordering in 'Get' request. It requires
// 'WithRange' and/or 'WithPrefix' to be specified too.
// 'target' specifies the target to sort by: key, version, revisions, value.
// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
func WithSort(target SortTarget, order SortOrder) OpOption {
return func(op *Op) {
op.sort = &SortOption{target, order}
}
}
func getPrefix(key []byte) []byte {
end := make([]byte, len(key))
copy(end, key)
for i := len(end) - 1; i >= 0; i-- {
if end[i] < 0xff {
end[i] = end[i] + 1
end = end[:i+1]
return end
}
}
// next prefix does not exist (e.g., 0xffff);
// default to WithFromKey policy
return noPrefixEnd
}
// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())'
// can return 'foo1', 'foo2', and so on.
func WithPrefix() OpOption {
return func(op *Op) {
op.end = getPrefix(op.key)
}
}
// WithRange specifies the range of 'Get' or 'Delete' requests.
// For example, 'Get' requests with 'WithRange(end)' returns
// the keys in the range [key, end).
func WithRange(endKey string) OpOption {
return func(op *Op) { op.end = []byte(endKey) }
}
// WithFromKey specifies the range of 'Get' or 'Delete' requests
// to be equal or greater than they key in the argument.
func WithFromKey() OpOption { return WithRange("\x00") }
// WithSerializable makes 'Get' request serializable. By default,
// it's linearizable. Serializable requests are better for lower latency
// requirement.
func WithSerializable() OpOption {
return func(op *Op) { op.serializable = true }
}
// WithFirstCreate gets the key with the oldest creation revision in the request range.
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
// WithLastCreate gets the key with the latest creation revision in the request range.
func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
// WithFirstKey gets the lexically first key in the request range.
func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
// WithLastKey gets the lexically last key in the request range.
func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) }
// WithFirstRev gets the key with the oldest modification revision in the request range.
func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) }
// WithLastRev gets the key with the latest modification revision in the request range.
func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) }
// withTop gets the first key over the get's prefix given a sort order
func withTop(target SortTarget, order SortOrder) []OpOption {
return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
}
// WithProgressNotify makes watch server send periodic progress updates.
// Progress updates have zero events in WatchResponse.
func WithProgressNotify() OpOption {
return func(op *Op) {
op.progressNotify = true
}
}

37
vendor/github.com/coreos/etcd/clientv3/sort.go generated vendored Normal file
View File

@@ -0,0 +1,37 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
type SortTarget int
type SortOrder int
const (
SortNone SortOrder = iota
SortAscend
SortDescend
)
const (
SortByKey SortTarget = iota
SortByVersion
SortByCreateRevision
SortByModRevision
SortByValue
)
type SortOption struct {
Target SortTarget
Order SortOrder
}

160
vendor/github.com/coreos/etcd/clientv3/txn.go generated vendored Normal file
View File

@@ -0,0 +1,160 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"sync"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
)
//
// Tx.If(
// Compare(Value(k1), ">", v1),
// Compare(Version(k1), "=", 2)
// ).Then(
// OpPut(k2,v2), OpPut(k3,v3)
// ).Else(
// OpPut(k4,v4), OpPut(k5,v5)
// ).Commit()
type Txn interface {
// If takes a list of comparison. If all comparisons passed in succeed,
// the operations passed into Then() will be executed. Or the operations
// passed into Else() will be executed.
If(cs ...Cmp) Txn
// Then takes a list of operations. The Ops list will be executed, if the
// comparisons passed in If() succeed.
Then(ops ...Op) Txn
// Else takes a list of operations. The Ops list will be executed, if the
// comparisons passed in If() fail.
Else(ops ...Op) Txn
// Commit tries to commit the transaction.
Commit() (*TxnResponse, error)
// TODO: add a Do for shortcut the txn without any condition?
}
type txn struct {
kv *kv
ctx context.Context
mu sync.Mutex
cif bool
cthen bool
celse bool
isWrite bool
cmps []*pb.Compare
sus []*pb.RequestUnion
fas []*pb.RequestUnion
}
func (txn *txn) If(cs ...Cmp) Txn {
txn.mu.Lock()
defer txn.mu.Unlock()
if txn.cif {
panic("cannot call If twice!")
}
if txn.cthen {
panic("cannot call If after Then!")
}
if txn.celse {
panic("cannot call If after Else!")
}
txn.cif = true
for i := range cs {
txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
}
return txn
}
func (txn *txn) Then(ops ...Op) Txn {
txn.mu.Lock()
defer txn.mu.Unlock()
if txn.cthen {
panic("cannot call Then twice!")
}
if txn.celse {
panic("cannot call Then after Else!")
}
txn.cthen = true
for _, op := range ops {
txn.isWrite = txn.isWrite || op.isWrite()
txn.sus = append(txn.sus, op.toRequestUnion())
}
return txn
}
func (txn *txn) Else(ops ...Op) Txn {
txn.mu.Lock()
defer txn.mu.Unlock()
if txn.celse {
panic("cannot call Else twice!")
}
txn.celse = true
for _, op := range ops {
txn.isWrite = txn.isWrite || op.isWrite()
txn.fas = append(txn.fas, op.toRequestUnion())
}
return txn
}
func (txn *txn) Commit() (*TxnResponse, error) {
txn.mu.Lock()
defer txn.mu.Unlock()
kv := txn.kv
for {
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
resp, err := kv.getRemote().Txn(txn.ctx, r)
if err == nil {
return (*TxnResponse)(resp), nil
}
if isHalted(txn.ctx, err) {
return nil, err
}
if txn.isWrite {
go kv.switchRemote(err)
return nil, err
}
if nerr := kv.switchRemote(err); nerr != nil {
return nil, nerr
}
}
}

572
vendor/github.com/coreos/etcd/clientv3/watch.go generated vendored Normal file
View File

@@ -0,0 +1,572 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"fmt"
"sync"
v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
storagepb "github.com/coreos/etcd/storage/storagepb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
const (
EventTypeDelete = storagepb.DELETE
EventTypePut = storagepb.PUT
)
type Event storagepb.Event
type WatchChan <-chan WatchResponse
type Watcher interface {
// Watch watches on a key or prefix. The watched events will be returned
// through the returned channel.
// If the watch is slow or the required rev is compacted, the watch request
// might be canceled from the server-side and the chan will be closed.
// 'opts' can be: 'WithRev' and/or 'WitchPrefix'.
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
// Close closes the watcher and cancels all watch requests.
Close() error
}
type WatchResponse struct {
Header pb.ResponseHeader
Events []*Event
// CompactRevision is the minimum revision the watcher may receive.
CompactRevision int64
// Canceled is used to indicate watch failure.
// If the watch failed and the stream was about to close, before the channel is closed,
// the channel sends a final response that has Canceled set to true with a non-nil Err().
Canceled bool
}
// IsCreate returns true if the event tells that the key is newly created.
func (e *Event) IsCreate() bool {
return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
}
// IsModify returns true if the event tells that a new value is put on existing key.
func (e *Event) IsModify() bool {
return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
}
// Err is the error value if this WatchResponse holds an error.
func (wr *WatchResponse) Err() error {
if wr.CompactRevision != 0 {
return v3rpc.ErrCompacted
}
if wr.Canceled {
return v3rpc.ErrFutureRev
}
return nil
}
// IsProgressNotify returns true if the WatchResponse is progress notification.
func (wr *WatchResponse) IsProgressNotify() bool {
return len(wr.Events) == 0 && !wr.Canceled
}
// watcher implements the Watcher interface
type watcher struct {
c *Client
conn *grpc.ClientConn
remote pb.WatchClient
// ctx controls internal remote.Watch requests
ctx context.Context
cancel context.CancelFunc
// streams holds all active watchers
streams map[int64]*watcherStream
// mu protects the streams map
mu sync.RWMutex
// reqc sends a watch request from Watch() to the main goroutine
reqc chan *watchRequest
// respc receives data from the watch client
respc chan *pb.WatchResponse
// stopc is sent to the main goroutine to stop all processing
stopc chan struct{}
// donec closes to broadcast shutdown
donec chan struct{}
// errc transmits errors from grpc Recv
errc chan error
}
// watchRequest is issued by the subscriber to start a new watcher
type watchRequest struct {
ctx context.Context
key string
end string
rev int64
// progressNotify is for progress updates.
progressNotify bool
// retc receives a chan WatchResponse once the watcher is established
retc chan chan WatchResponse
}
// watcherStream represents a registered watcher
type watcherStream struct {
initReq watchRequest
// outc publishes watch responses to subscriber
outc chan<- WatchResponse
// recvc buffers watch responses before publishing
recvc chan *WatchResponse
id int64
// lastRev is revision last successfully sent over outc
lastRev int64
// resumec indicates the stream must recover at a given revision
resumec chan int64
}
func NewWatcher(c *Client) Watcher {
ctx, cancel := context.WithCancel(context.Background())
conn := c.ActiveConnection()
w := &watcher{
c: c,
conn: conn,
remote: pb.NewWatchClient(conn),
ctx: ctx,
cancel: cancel,
streams: make(map[int64]*watcherStream),
respc: make(chan *pb.WatchResponse),
reqc: make(chan *watchRequest),
stopc: make(chan struct{}),
donec: make(chan struct{}),
errc: make(chan error, 1),
}
go w.run()
return w
}
// Watch posts a watch request to run() and waits for a new watcher channel
func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
ow := opWatch(key, opts...)
retc := make(chan chan WatchResponse, 1)
wr := &watchRequest{
ctx: ctx,
key: string(ow.key),
end: string(ow.end),
rev: ow.rev,
progressNotify: ow.progressNotify,
retc: retc,
}
ok := false
// submit request
select {
case w.reqc <- wr:
ok = true
case <-wr.ctx.Done():
case <-w.donec:
}
// receive channel
if ok {
select {
case ret := <-retc:
return ret
case <-ctx.Done():
case <-w.donec:
}
}
// couldn't create channel; return closed channel
ch := make(chan WatchResponse)
close(ch)
return ch
}
func (w *watcher) Close() error {
select {
case w.stopc <- struct{}{}:
case <-w.donec:
}
<-w.donec
return <-w.errc
}
func (w *watcher) addStream(resp *pb.WatchResponse, pendingReq *watchRequest) {
if pendingReq == nil {
// no pending request; ignore
return
}
if resp.Canceled || resp.CompactRevision != 0 {
// a cancel at id creation time means the start revision has
// been compacted out of the store
ret := make(chan WatchResponse, 1)
ret <- WatchResponse{
Header: *resp.Header,
CompactRevision: resp.CompactRevision,
Canceled: true}
close(ret)
pendingReq.retc <- ret
return
}
ret := make(chan WatchResponse)
if resp.WatchId == -1 {
// failed; no channel
close(ret)
pendingReq.retc <- ret
return
}
ws := &watcherStream{
initReq: *pendingReq,
id: resp.WatchId,
outc: ret,
// buffered so unlikely to block on sending while holding mu
recvc: make(chan *WatchResponse, 4),
resumec: make(chan int64),
}
if pendingReq.rev == 0 {
// note the header revision so that a put following a current watcher
// disconnect will arrive on the watcher channel after reconnect
ws.initReq.rev = resp.Header.Revision
}
w.mu.Lock()
w.streams[ws.id] = ws
w.mu.Unlock()
// pass back the subscriber channel for the watcher
pendingReq.retc <- ret
// send messages to subscriber
go w.serveStream(ws)
}
// closeStream closes the watcher resources and removes it
func (w *watcher) closeStream(ws *watcherStream) {
// cancels request stream; subscriber receives nil channel
close(ws.initReq.retc)
// close subscriber's channel
close(ws.outc)
// shutdown serveStream
close(ws.recvc)
delete(w.streams, ws.id)
}
// run is the root of the goroutines for managing a watcher client
func (w *watcher) run() {
defer func() {
close(w.donec)
w.cancel()
}()
// start a stream with the etcd grpc server
wc, wcerr := w.newWatchClient()
if wcerr != nil {
w.errc <- wcerr
return
}
var pendingReq, failedReq *watchRequest
curReqC := w.reqc
cancelSet := make(map[int64]struct{})
for {
select {
// Watch() requested
case pendingReq = <-curReqC:
// no more watch requests until there's a response
curReqC = nil
if err := wc.Send(pendingReq.toPB()); err == nil {
// pendingReq now waits on w.respc
break
}
failedReq = pendingReq
// New events from the watch client
case pbresp := <-w.respc:
switch {
case pbresp.Created:
// response to pending req, try to add
w.addStream(pbresp, pendingReq)
pendingReq = nil
curReqC = w.reqc
case pbresp.Canceled:
delete(cancelSet, pbresp.WatchId)
default:
// dispatch to appropriate watch stream
if ok := w.dispatchEvent(pbresp); ok {
break
}
// watch response on unexpected watch id; cancel id
if _, ok := cancelSet[pbresp.WatchId]; ok {
break
}
cancelSet[pbresp.WatchId] = struct{}{}
cr := &pb.WatchRequest_CancelRequest{
CancelRequest: &pb.WatchCancelRequest{
WatchId: pbresp.WatchId,
},
}
req := &pb.WatchRequest{RequestUnion: cr}
wc.Send(req)
}
// watch client failed to recv; spawn another if possible
// TODO report watch client errors from errc?
case <-w.errc:
if wc, wcerr = w.newWatchClient(); wcerr != nil {
w.errc <- wcerr
return
}
curReqC = w.reqc
if pendingReq != nil {
failedReq = pendingReq
}
cancelSet = make(map[int64]struct{})
case <-w.stopc:
w.errc <- nil
return
}
// send failed; queue for retry
if failedReq != nil {
go func(wr *watchRequest) {
select {
case w.reqc <- wr:
case <-wr.ctx.Done():
case <-w.donec:
}
}(pendingReq)
failedReq = nil
pendingReq = nil
}
}
}
// dispatchEvent sends a WatchResponse to the appropriate watcher stream
func (w *watcher) dispatchEvent(pbresp *pb.WatchResponse) bool {
w.mu.RLock()
defer w.mu.RUnlock()
ws, ok := w.streams[pbresp.WatchId]
events := make([]*Event, len(pbresp.Events))
for i, ev := range pbresp.Events {
events[i] = (*Event)(ev)
}
if ok {
wr := &WatchResponse{
Header: *pbresp.Header,
Events: events,
CompactRevision: pbresp.CompactRevision,
Canceled: pbresp.Canceled}
ws.recvc <- wr
}
return ok
}
// serveWatchClient forwards messages from the grpc stream to run()
func (w *watcher) serveWatchClient(wc pb.Watch_WatchClient) {
for {
resp, err := wc.Recv()
if err != nil {
select {
case w.errc <- err:
case <-w.donec:
}
return
}
select {
case w.respc <- resp:
case <-w.donec:
return
}
}
}
// serveStream forwards watch responses from run() to the subscriber
func (w *watcher) serveStream(ws *watcherStream) {
emptyWr := &WatchResponse{}
wrs := []*WatchResponse{}
resuming := false
closing := false
for !closing {
curWr := emptyWr
outc := ws.outc
if len(wrs) > 0 {
curWr = wrs[0]
} else {
outc = nil
}
select {
case outc <- *curWr:
if wrs[0].Err() != nil {
closing = true
break
}
var newRev int64
if len(wrs[0].Events) > 0 {
newRev = wrs[0].Events[len(wrs[0].Events)-1].Kv.ModRevision
} else {
newRev = wrs[0].Header.Revision
}
if newRev != ws.lastRev {
ws.lastRev = newRev
}
wrs[0] = nil
wrs = wrs[1:]
case wr, ok := <-ws.recvc:
if !ok {
// shutdown from closeStream
return
}
// resume up to last seen event if disconnected
if resuming {
resuming = false
// trim events already seen
for i := 0; i < len(wr.Events); i++ {
if wr.Events[i].Kv.ModRevision > ws.lastRev {
wr.Events = wr.Events[i:]
break
}
}
// only forward new events
if wr.Events[0].Kv.ModRevision == ws.lastRev {
break
}
}
// TODO don't keep buffering if subscriber stops reading
wrs = append(wrs, wr)
case resumeRev := <-ws.resumec:
wrs = nil
resuming = true
if resumeRev == -1 {
// pause serving stream while resume gets set up
break
}
if resumeRev != ws.lastRev {
panic("unexpected resume revision")
}
case <-w.donec:
closing = true
case <-ws.initReq.ctx.Done():
closing = true
}
}
w.mu.Lock()
w.closeStream(ws)
w.mu.Unlock()
// lazily send cancel message if events on missing id
}
func (w *watcher) newWatchClient() (pb.Watch_WatchClient, error) {
ws, rerr := w.resume()
if rerr != nil {
return nil, rerr
}
go w.serveWatchClient(ws)
return ws, nil
}
// resume creates a new WatchClient with all current watchers reestablished
func (w *watcher) resume() (ws pb.Watch_WatchClient, err error) {
for {
if ws, err = w.openWatchClient(); err != nil {
break
} else if err = w.resumeWatchers(ws); err == nil {
break
}
}
return ws, err
}
// openWatchClient retries opening a watchclient until retryConnection fails
func (w *watcher) openWatchClient() (ws pb.Watch_WatchClient, err error) {
for {
if ws, err = w.remote.Watch(w.ctx); ws != nil {
break
} else if isHalted(w.ctx, err) {
return nil, err
}
newConn, nerr := w.c.retryConnection(w.conn, nil)
if nerr != nil {
return nil, nerr
}
w.conn = newConn
w.remote = pb.NewWatchClient(w.conn)
}
return ws, nil
}
// resumeWatchers rebuilds every registered watcher on a new client
func (w *watcher) resumeWatchers(wc pb.Watch_WatchClient) error {
streams := []*watcherStream{}
w.mu.RLock()
for _, ws := range w.streams {
streams = append(streams, ws)
}
w.mu.RUnlock()
for _, ws := range streams {
// pause serveStream
ws.resumec <- -1
// reconstruct watcher from initial request
if ws.lastRev != 0 {
ws.initReq.rev = ws.lastRev
}
if err := wc.Send(ws.initReq.toPB()); err != nil {
return err
}
// wait for request ack
resp, err := wc.Recv()
if err != nil {
return err
} else if len(resp.Events) != 0 || !resp.Created {
return fmt.Errorf("watcher: unexpected response (%+v)", resp)
}
// id may be different since new remote watcher; update map
w.mu.Lock()
delete(w.streams, ws.id)
ws.id = resp.WatchId
w.streams[ws.id] = ws
w.mu.Unlock()
// unpause serveStream
ws.resumec <- ws.lastRev
}
return nil
}
// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest)
func (wr *watchRequest) toPB() *pb.WatchRequest {
req := &pb.WatchCreateRequest{
StartRevision: wr.rev,
Key: []byte(wr.key),
RangeEnd: []byte(wr.end),
ProgressNotify: wr.progressNotify,
}
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
return &pb.WatchRequest{RequestUnion: cr}
}