1
0
mirror of https://github.com/rancher/os.git synced 2025-09-09 10:40:30 +00:00

Godep updates

google.golang.org/cloud
github.com/fsouza/go-dockerclient
golang.org/x/net/context
This commit is contained in:
Darren Shepherd
2015-04-10 09:09:47 -07:00
parent 2a69c5f38e
commit 15bd54a841
16 changed files with 3811 additions and 4 deletions

14
Godeps/Godeps.json generated
View File

@@ -205,7 +205,7 @@
}, },
{ {
"ImportPath": "github.com/fsouza/go-dockerclient", "ImportPath": "github.com/fsouza/go-dockerclient",
"Rev": "9d65f5b8a5731a788b8b13634022089a31af45dc" "Rev": "fb0e9fb80f074795d7c11eba700eb33058b14bfb"
}, },
{ {
"ImportPath": "github.com/guelfey/go.dbus", "ImportPath": "github.com/guelfey/go.dbus",
@@ -242,6 +242,18 @@
"ImportPath": "github.com/vishvananda/netlink", "ImportPath": "github.com/vishvananda/netlink",
"Rev": "ae3e7dba57271b4e976c4f91637861ee477135e2" "Rev": "ae3e7dba57271b4e976c4f91637861ee477135e2"
}, },
{
"ImportPath": "golang.org/x/net/context",
"Rev": "84ba27dd5b2d8135e9da1395277f2c9333a2ffda"
},
{
"ImportPath": "google.golang.org/cloud/compute/metadata",
"Rev": "c97f5f9979a8582f3ab72873a51979619801248b"
},
{
"ImportPath": "google.golang.org/cloud/internal",
"Rev": "c97f5f9979a8582f3ab72873a51979619801248b"
},
{ {
"ImportPath": "gopkg.in/yaml.v2", "ImportPath": "gopkg.in/yaml.v2",
"Rev": "a1c4bcb6c278a41992e2f4f0f29a44b4146daa5c" "Rev": "a1c4bcb6c278a41992e2f4f0f29a44b4146daa5c"

View File

@@ -25,6 +25,7 @@ Fabio Rehm <fgrehm@gmail.com>
Fatih Arslan <ftharsln@gmail.com> Fatih Arslan <ftharsln@gmail.com>
Flavia Missi <flaviamissi@gmail.com> Flavia Missi <flaviamissi@gmail.com>
Francisco Souza <f@souza.cc> Francisco Souza <f@souza.cc>
Guillermo Álvarez Fernández <guillermo@cientifico.net>
Jari Kolehmainen <jari.kolehmainen@digia.com> Jari Kolehmainen <jari.kolehmainen@digia.com>
Jason Wilder <jwilder@litl.com> Jason Wilder <jwilder@litl.com>
Jawher Moussa <jawher.moussa@gmail.com> Jawher Moussa <jawher.moussa@gmail.com>
@@ -35,6 +36,7 @@ Johan Euphrosine <proppy@google.com>
Kamil Domanski <kamil@domanski.co> Kamil Domanski <kamil@domanski.co>
Karan Misra <kidoman@gmail.com> Karan Misra <kidoman@gmail.com>
Kim, Hirokuni <hirokuni.kim@kvh.co.jp> Kim, Hirokuni <hirokuni.kim@kvh.co.jp>
liron-l <levinlir@gmail.com>
Lucas Clemente <lucas@clemente.io> Lucas Clemente <lucas@clemente.io>
Lucas Weiblen <lucasweiblen@gmail.com> Lucas Weiblen <lucasweiblen@gmail.com>
Mantas Matelis <mmatelis@coursera.org> Mantas Matelis <mmatelis@coursera.org>
@@ -52,6 +54,7 @@ Rafe Colton <rafael.colton@gmail.com>
Rob Miller <rob@kalistra.com> Rob Miller <rob@kalistra.com>
Robert Williamson <williamson.robert@gmail.com> Robert Williamson <williamson.robert@gmail.com>
Salvador Gironès <salvadorgirones@gmail.com> Salvador Gironès <salvadorgirones@gmail.com>
Sam Rijs <srijs@airpost.net>
Simon Eskildsen <sirup@sirupsen.com> Simon Eskildsen <sirup@sirupsen.com>
Simon Menke <simon.menke@gmail.com> Simon Menke <simon.menke@gmail.com>
Skolos <skolos@gopherlab.com> Skolos <skolos@gopherlab.com>

View File

@@ -198,7 +198,7 @@ type Config struct {
Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
} }
// LogConfig log driver type and configuration // LogConfig defines the log driver type and the configuration for it.
type LogConfig struct { type LogConfig struct {
Type string `json:"Type,omitempty" yaml:"Type,omitempty"` Type string `json:"Type,omitempty" yaml:"Type,omitempty"`
Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty"` Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty"`
@@ -244,6 +244,8 @@ type Container struct {
VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty"` VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty"`
HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"` HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"`
ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty"` ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty"`
AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty"`
} }
// RenameContainerOptions specify parameters to the RenameContainer function. // RenameContainerOptions specify parameters to the RenameContainer function.
@@ -413,6 +415,8 @@ type HostConfig struct {
RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"` RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"`
Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"` Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"`
LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"` LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"`
ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"`
SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"`
} }
// StartContainer starts a container, returning an error in case of failure. // StartContainer starts a container, returning an error in case of failure.

View File

@@ -155,6 +155,7 @@ func TestListContainersFailure(t *testing.T) {
func TestInspectContainer(t *testing.T) { func TestInspectContainer(t *testing.T) {
jsonContainer := `{ jsonContainer := `{
"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
"AppArmorProfile": "Profile",
"Created": "2013-05-07T14:51:42.087658+02:00", "Created": "2013-05-07T14:51:42.087658+02:00",
"Path": "date", "Path": "date",
"Args": [], "Args": [],
@@ -176,7 +177,10 @@ func TestInspectContainer(t *testing.T) {
], ],
"Image": "base", "Image": "base",
"Volumes": {}, "Volumes": {},
"VolumesFrom": "" "VolumesFrom": "",
"SecurityOpt": [
"label:user:USER"
]
}, },
"State": { "State": {
"Running": false, "Running": false,

View File

@@ -252,7 +252,7 @@ type PullImageOptions struct {
RawJSONStream bool `qs:"-"` RawJSONStream bool `qs:"-"`
} }
// PullImage pulls an image from a remote registry, logging progress to w. // PullImage pulls an image from a remote registry, logging progress to opts.OutputStream.
// //
// See http://goo.gl/ACyYNS for more details. // See http://goo.gl/ACyYNS for more details.
func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error { func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error {

View File

@@ -168,6 +168,11 @@ func (s *DockerServer) ResetFailure(id string) {
delete(s.failures, id) delete(s.failures, id)
} }
// ResetMultiFailures removes all enqueued failures.
func (s *DockerServer) ResetMultiFailures() {
s.multiFailures = []map[string]string{}
}
// CustomHandler registers a custom handler for a specific path. // CustomHandler registers a custom handler for a specific path.
// //
// For example: // For example:

View File

@@ -1165,6 +1165,21 @@ func TestRemoveFailure(t *testing.T) {
} }
} }
func TestResetMultiFailures(t *testing.T) {
server := DockerServer{multiFailures: []map[string]string{}}
server.buildMuxer()
errorID := "multi error"
server.PrepareMultiFailures(errorID, "containers/json")
server.PrepareMultiFailures(errorID, "containers/json")
if len(server.multiFailures) != 2 {
t.Errorf("PrepareMultiFailures: error adding multi failures.")
}
server.ResetMultiFailures()
if len(server.multiFailures) != 0 {
t.Errorf("ResetMultiFailures: error reseting multi failures.")
}
}
func TestMutateContainer(t *testing.T) { func TestMutateContainer(t *testing.T) {
server := DockerServer{failures: make(map[string]string)} server := DockerServer{failures: make(map[string]string)}
server.buildMuxer() server.buildMuxer()

View File

@@ -0,0 +1,447 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package context defines the Context type, which carries deadlines,
// cancelation signals, and other request-scoped values across API boundaries
// and between processes.
//
// Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must
// propagate the Context, optionally replacing it with a modified copy created
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
//
// Programs that use Contexts should follow these rules to keep interfaces
// consistent across packages and enable static analysis tools to check context
// propagation:
//
// Do not store Contexts inside a struct type; instead, pass a Context
// explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx:
//
// func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ...
// }
//
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
//
// The same Context may be passed to functions running in different goroutines;
// Contexts are safe for simultaneous use by multiple goroutines.
//
// See http://blog.golang.org/context for example code for a server that uses
// Contexts.
package context
import (
"errors"
"fmt"
"sync"
"time"
)
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out <-chan Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stores using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "golang.org/x/net/context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key = 0
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = errors.New("context canceled")
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = errors.New("context deadline exceeded")
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses.
type emptyCtx int
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (*emptyCtx) Done() <-chan struct{} {
return nil
}
func (*emptyCtx) Err() error {
return nil
}
func (*emptyCtx) Value(key interface{}) interface{} {
return nil
}
func (e *emptyCtx) String() string {
switch e {
case background:
return "context.Background"
case todo:
return "context.TODO"
}
return "unknown empty Context"
}
var (
background = new(emptyCtx)
todo = new(emptyCtx)
)
// Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
// requests.
func Background() Context {
return background
}
// TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it's is not yet available (because the
// surrounding function has not yet been extended to accept a Context
// parameter). TODO is recognized by static analysis tools that determine
// whether Contexts are propagated correctly in a program.
func TODO() Context {
return todo
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
c := newCancelCtx(parent)
propagateCancel(parent, &c)
return &c, func() { c.cancel(true, Canceled) }
}
// newCancelCtx returns an initialized cancelCtx.
func newCancelCtx(parent Context) cancelCtx {
return cancelCtx{
Context: parent,
done: make(chan struct{}),
}
}
// propagateCancel arranges for child to be canceled when parent is.
func propagateCancel(parent Context, child canceler) {
if parent.Done() == nil {
return // parent is never canceled
}
if p, ok := parentCancelCtx(parent); ok {
p.mu.Lock()
if p.err != nil {
// parent has already been canceled
child.cancel(false, p.err)
} else {
if p.children == nil {
p.children = make(map[canceler]bool)
}
p.children[child] = true
}
p.mu.Unlock()
} else {
go func() {
select {
case <-parent.Done():
child.cancel(false, parent.Err())
case <-child.Done():
}
}()
}
}
// parentCancelCtx follows a chain of parent references until it finds a
// *cancelCtx. This function understands how each of the concrete types in this
// package represents its parent.
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
for {
switch c := parent.(type) {
case *cancelCtx:
return c, true
case *timerCtx:
return &c.cancelCtx, true
case *valueCtx:
parent = c.Context
default:
return nil, false
}
}
}
// removeChild removes a context from its parent.
func removeChild(parent Context, child canceler) {
p, ok := parentCancelCtx(parent)
if !ok {
return
}
p.mu.Lock()
if p.children != nil {
delete(p.children, child)
}
p.mu.Unlock()
}
// A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx.
type canceler interface {
cancel(removeFromParent bool, err error)
Done() <-chan struct{}
}
// A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler.
type cancelCtx struct {
Context
done chan struct{} // closed by the first cancel call.
mu sync.Mutex
children map[canceler]bool // set to nil by the first cancel call
err error // set to non-nil by the first cancel call
}
func (c *cancelCtx) Done() <-chan struct{} {
return c.done
}
func (c *cancelCtx) Err() error {
c.mu.Lock()
defer c.mu.Unlock()
return c.err
}
func (c *cancelCtx) String() string {
return fmt.Sprintf("%v.WithCancel", c.Context)
}
// cancel closes c.done, cancels each of c's children, and, if
// removeFromParent is true, removes c from its parent's children.
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
if err == nil {
panic("context: internal error: missing cancel error")
}
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return // already canceled
}
c.err = err
close(c.done)
for child := range c.children {
// NOTE: acquiring the child's lock while holding parent's lock.
child.cancel(false, err)
}
c.children = nil
c.mu.Unlock()
if removeFromParent {
removeChild(c.Context, c)
}
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
// The current deadline is already sooner than the new one.
return WithCancel(parent)
}
c := &timerCtx{
cancelCtx: newCancelCtx(parent),
deadline: deadline,
}
propagateCancel(parent, c)
d := deadline.Sub(time.Now())
if d <= 0 {
c.cancel(true, DeadlineExceeded) // deadline has already passed
return c, func() { c.cancel(true, Canceled) }
}
c.mu.Lock()
defer c.mu.Unlock()
if c.err == nil {
c.timer = time.AfterFunc(d, func() {
c.cancel(true, DeadlineExceeded)
})
}
return c, func() { c.cancel(true, Canceled) }
}
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
// implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel.
type timerCtx struct {
cancelCtx
timer *time.Timer // Under cancelCtx.mu.
deadline time.Time
}
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
return c.deadline, true
}
func (c *timerCtx) String() string {
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
}
func (c *timerCtx) cancel(removeFromParent bool, err error) {
c.cancelCtx.cancel(false, err)
if removeFromParent {
// Remove this timerCtx from its parent cancelCtx's children.
removeChild(c.cancelCtx.Context, c)
}
c.mu.Lock()
if c.timer != nil {
c.timer.Stop()
c.timer = nil
}
c.mu.Unlock()
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return &valueCtx{parent, key, val}
}
// A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context.
type valueCtx struct {
Context
key, val interface{}
}
func (c *valueCtx) String() string {
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
}
func (c *valueCtx) Value(key interface{}) interface{} {
if c.key == key {
return c.val
}
return c.Context.Value(key)
}

View File

@@ -0,0 +1,575 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package context
import (
"fmt"
"math/rand"
"runtime"
"strings"
"sync"
"testing"
"time"
)
// otherContext is a Context that's not one of the types defined in context.go.
// This lets us test code paths that differ based on the underlying type of the
// Context.
type otherContext struct {
Context
}
func TestBackground(t *testing.T) {
c := Background()
if c == nil {
t.Fatalf("Background returned nil")
}
select {
case x := <-c.Done():
t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
default:
}
if got, want := fmt.Sprint(c), "context.Background"; got != want {
t.Errorf("Background().String() = %q want %q", got, want)
}
}
func TestTODO(t *testing.T) {
c := TODO()
if c == nil {
t.Fatalf("TODO returned nil")
}
select {
case x := <-c.Done():
t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
default:
}
if got, want := fmt.Sprint(c), "context.TODO"; got != want {
t.Errorf("TODO().String() = %q want %q", got, want)
}
}
func TestWithCancel(t *testing.T) {
c1, cancel := WithCancel(Background())
if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want {
t.Errorf("c1.String() = %q want %q", got, want)
}
o := otherContext{c1}
c2, _ := WithCancel(o)
contexts := []Context{c1, o, c2}
for i, c := range contexts {
if d := c.Done(); d == nil {
t.Errorf("c[%d].Done() == %v want non-nil", i, d)
}
if e := c.Err(); e != nil {
t.Errorf("c[%d].Err() == %v want nil", i, e)
}
select {
case x := <-c.Done():
t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
default:
}
}
cancel()
time.Sleep(100 * time.Millisecond) // let cancelation propagate
for i, c := range contexts {
select {
case <-c.Done():
default:
t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i)
}
if e := c.Err(); e != Canceled {
t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled)
}
}
}
func TestParentFinishesChild(t *testing.T) {
// Context tree:
// parent -> cancelChild
// parent -> valueChild -> timerChild
parent, cancel := WithCancel(Background())
cancelChild, stop := WithCancel(parent)
defer stop()
valueChild := WithValue(parent, "key", "value")
timerChild, stop := WithTimeout(valueChild, 10000*time.Hour)
defer stop()
select {
case x := <-parent.Done():
t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
case x := <-cancelChild.Done():
t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x)
case x := <-timerChild.Done():
t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x)
case x := <-valueChild.Done():
t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x)
default:
}
// The parent's children should contain the two cancelable children.
pc := parent.(*cancelCtx)
cc := cancelChild.(*cancelCtx)
tc := timerChild.(*timerCtx)
pc.mu.Lock()
if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] {
t.Errorf("bad linkage: pc.children = %v, want %v and %v",
pc.children, cc, tc)
}
pc.mu.Unlock()
if p, ok := parentCancelCtx(cc.Context); !ok || p != pc {
t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc)
}
if p, ok := parentCancelCtx(tc.Context); !ok || p != pc {
t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc)
}
cancel()
pc.mu.Lock()
if len(pc.children) != 0 {
t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children)
}
pc.mu.Unlock()
// parent and children should all be finished.
check := func(ctx Context, name string) {
select {
case <-ctx.Done():
default:
t.Errorf("<-%s.Done() blocked, but shouldn't have", name)
}
if e := ctx.Err(); e != Canceled {
t.Errorf("%s.Err() == %v want %v", name, e, Canceled)
}
}
check(parent, "parent")
check(cancelChild, "cancelChild")
check(valueChild, "valueChild")
check(timerChild, "timerChild")
// WithCancel should return a canceled context on a canceled parent.
precanceledChild := WithValue(parent, "key", "value")
select {
case <-precanceledChild.Done():
default:
t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have")
}
if e := precanceledChild.Err(); e != Canceled {
t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled)
}
}
func TestChildFinishesFirst(t *testing.T) {
cancelable, stop := WithCancel(Background())
defer stop()
for _, parent := range []Context{Background(), cancelable} {
child, cancel := WithCancel(parent)
select {
case x := <-parent.Done():
t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
case x := <-child.Done():
t.Errorf("<-child.Done() == %v want nothing (it should block)", x)
default:
}
cc := child.(*cancelCtx)
pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background()
if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) {
t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok)
}
if pcok {
pc.mu.Lock()
if len(pc.children) != 1 || !pc.children[cc] {
t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc)
}
pc.mu.Unlock()
}
cancel()
if pcok {
pc.mu.Lock()
if len(pc.children) != 0 {
t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children)
}
pc.mu.Unlock()
}
// child should be finished.
select {
case <-child.Done():
default:
t.Errorf("<-child.Done() blocked, but shouldn't have")
}
if e := child.Err(); e != Canceled {
t.Errorf("child.Err() == %v want %v", e, Canceled)
}
// parent should not be finished.
select {
case x := <-parent.Done():
t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
default:
}
if e := parent.Err(); e != nil {
t.Errorf("parent.Err() == %v want nil", e)
}
}
}
func testDeadline(c Context, wait time.Duration, t *testing.T) {
select {
case <-time.After(wait):
t.Fatalf("context should have timed out")
case <-c.Done():
}
if e := c.Err(); e != DeadlineExceeded {
t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded)
}
}
func TestDeadline(t *testing.T) {
c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
t.Errorf("c.String() = %q want prefix %q", got, prefix)
}
testDeadline(c, 200*time.Millisecond, t)
c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
o := otherContext{c}
testDeadline(o, 200*time.Millisecond, t)
c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
o = otherContext{c}
c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond))
testDeadline(c, 200*time.Millisecond, t)
}
func TestTimeout(t *testing.T) {
c, _ := WithTimeout(Background(), 100*time.Millisecond)
if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
t.Errorf("c.String() = %q want prefix %q", got, prefix)
}
testDeadline(c, 200*time.Millisecond, t)
c, _ = WithTimeout(Background(), 100*time.Millisecond)
o := otherContext{c}
testDeadline(o, 200*time.Millisecond, t)
c, _ = WithTimeout(Background(), 100*time.Millisecond)
o = otherContext{c}
c, _ = WithTimeout(o, 300*time.Millisecond)
testDeadline(c, 200*time.Millisecond, t)
}
func TestCanceledTimeout(t *testing.T) {
c, _ := WithTimeout(Background(), 200*time.Millisecond)
o := otherContext{c}
c, cancel := WithTimeout(o, 400*time.Millisecond)
cancel()
time.Sleep(100 * time.Millisecond) // let cancelation propagate
select {
case <-c.Done():
default:
t.Errorf("<-c.Done() blocked, but shouldn't have")
}
if e := c.Err(); e != Canceled {
t.Errorf("c.Err() == %v want %v", e, Canceled)
}
}
type key1 int
type key2 int
var k1 = key1(1)
var k2 = key2(1) // same int as k1, different type
var k3 = key2(3) // same type as k2, different int
func TestValues(t *testing.T) {
check := func(c Context, nm, v1, v2, v3 string) {
if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 {
t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0)
}
if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 {
t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0)
}
if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 {
t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0)
}
}
c0 := Background()
check(c0, "c0", "", "", "")
c1 := WithValue(Background(), k1, "c1k1")
check(c1, "c1", "c1k1", "", "")
if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want {
t.Errorf("c.String() = %q want %q", got, want)
}
c2 := WithValue(c1, k2, "c2k2")
check(c2, "c2", "c1k1", "c2k2", "")
c3 := WithValue(c2, k3, "c3k3")
check(c3, "c2", "c1k1", "c2k2", "c3k3")
c4 := WithValue(c3, k1, nil)
check(c4, "c4", "", "c2k2", "c3k3")
o0 := otherContext{Background()}
check(o0, "o0", "", "", "")
o1 := otherContext{WithValue(Background(), k1, "c1k1")}
check(o1, "o1", "c1k1", "", "")
o2 := WithValue(o1, k2, "o2k2")
check(o2, "o2", "c1k1", "o2k2", "")
o3 := otherContext{c4}
check(o3, "o3", "", "c2k2", "c3k3")
o4 := WithValue(o3, k3, nil)
check(o4, "o4", "", "c2k2", "")
}
func TestAllocs(t *testing.T) {
bg := Background()
for _, test := range []struct {
desc string
f func()
limit float64
gccgoLimit float64
}{
{
desc: "Background()",
f: func() { Background() },
limit: 0,
gccgoLimit: 0,
},
{
desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1),
f: func() {
c := WithValue(bg, k1, nil)
c.Value(k1)
},
limit: 3,
gccgoLimit: 3,
},
{
desc: "WithTimeout(bg, 15*time.Millisecond)",
f: func() {
c, _ := WithTimeout(bg, 15*time.Millisecond)
<-c.Done()
},
limit: 8,
gccgoLimit: 13,
},
{
desc: "WithCancel(bg)",
f: func() {
c, cancel := WithCancel(bg)
cancel()
<-c.Done()
},
limit: 5,
gccgoLimit: 8,
},
{
desc: "WithTimeout(bg, 100*time.Millisecond)",
f: func() {
c, cancel := WithTimeout(bg, 100*time.Millisecond)
cancel()
<-c.Done()
},
limit: 8,
gccgoLimit: 25,
},
} {
limit := test.limit
if runtime.Compiler == "gccgo" {
// gccgo does not yet do escape analysis.
// TOOD(iant): Remove this when gccgo does do escape analysis.
limit = test.gccgoLimit
}
if n := testing.AllocsPerRun(100, test.f); n > limit {
t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit))
}
}
}
func TestSimultaneousCancels(t *testing.T) {
root, cancel := WithCancel(Background())
m := map[Context]CancelFunc{root: cancel}
q := []Context{root}
// Create a tree of contexts.
for len(q) != 0 && len(m) < 100 {
parent := q[0]
q = q[1:]
for i := 0; i < 4; i++ {
ctx, cancel := WithCancel(parent)
m[ctx] = cancel
q = append(q, ctx)
}
}
// Start all the cancels in a random order.
var wg sync.WaitGroup
wg.Add(len(m))
for _, cancel := range m {
go func(cancel CancelFunc) {
cancel()
wg.Done()
}(cancel)
}
// Wait on all the contexts in a random order.
for ctx := range m {
select {
case <-ctx.Done():
case <-time.After(1 * time.Second):
buf := make([]byte, 10<<10)
n := runtime.Stack(buf, true)
t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n])
}
}
// Wait for all the cancel functions to return.
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
select {
case <-done:
case <-time.After(1 * time.Second):
buf := make([]byte, 10<<10)
n := runtime.Stack(buf, true)
t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n])
}
}
func TestInterlockedCancels(t *testing.T) {
parent, cancelParent := WithCancel(Background())
child, cancelChild := WithCancel(parent)
go func() {
parent.Done()
cancelChild()
}()
cancelParent()
select {
case <-child.Done():
case <-time.After(1 * time.Second):
buf := make([]byte, 10<<10)
n := runtime.Stack(buf, true)
t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n])
}
}
func TestLayersCancel(t *testing.T) {
testLayers(t, time.Now().UnixNano(), false)
}
func TestLayersTimeout(t *testing.T) {
testLayers(t, time.Now().UnixNano(), true)
}
func testLayers(t *testing.T, seed int64, testTimeout bool) {
rand.Seed(seed)
errorf := func(format string, a ...interface{}) {
t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...)
}
const (
timeout = 200 * time.Millisecond
minLayers = 30
)
type value int
var (
vals []*value
cancels []CancelFunc
numTimers int
ctx = Background()
)
for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ {
switch rand.Intn(3) {
case 0:
v := new(value)
ctx = WithValue(ctx, v, v)
vals = append(vals, v)
case 1:
var cancel CancelFunc
ctx, cancel = WithCancel(ctx)
cancels = append(cancels, cancel)
case 2:
var cancel CancelFunc
ctx, cancel = WithTimeout(ctx, timeout)
cancels = append(cancels, cancel)
numTimers++
}
}
checkValues := func(when string) {
for _, key := range vals {
if val := ctx.Value(key).(*value); key != val {
errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key)
}
}
}
select {
case <-ctx.Done():
errorf("ctx should not be canceled yet")
default:
}
if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) {
t.Errorf("ctx.String() = %q want prefix %q", s, prefix)
}
t.Log(ctx)
checkValues("before cancel")
if testTimeout {
select {
case <-ctx.Done():
case <-time.After(timeout + timeout/10):
errorf("ctx should have timed out")
}
checkValues("after timeout")
} else {
cancel := cancels[rand.Intn(len(cancels))]
cancel()
select {
case <-ctx.Done():
default:
errorf("ctx should be canceled")
}
checkValues("after cancel")
}
}
func TestCancelRemoves(t *testing.T) {
checkChildren := func(when string, ctx Context, want int) {
if got := len(ctx.(*cancelCtx).children); got != want {
t.Errorf("%s: context has %d children, want %d", when, got, want)
}
}
ctx, _ := WithCancel(Background())
checkChildren("after creation", ctx, 0)
_, cancel := WithCancel(ctx)
checkChildren("with WithCancel child ", ctx, 1)
cancel()
checkChildren("after cancelling WithCancel child", ctx, 0)
ctx, _ = WithCancel(Background())
checkChildren("after creation", ctx, 0)
_, cancel = WithTimeout(ctx, 60*time.Minute)
checkChildren("with WithTimeout child ", ctx, 1)
cancel()
checkChildren("after cancelling WithTimeout child", ctx, 0)
}

View File

@@ -0,0 +1,26 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package context_test
import (
"fmt"
"time"
"golang.org/x/net/context"
)
func ExampleWithTimeout() {
// Pass a context with a timeout to tell a blocking function that it
// should abandon its work after the timeout elapses.
ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
select {
case <-time.After(200 * time.Millisecond):
fmt.Println("overslept")
case <-ctx.Done():
fmt.Println(ctx.Err()) // prints "context deadline exceeded"
}
// Output:
// context deadline exceeded
}

View File

@@ -0,0 +1,37 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.3
package metadata
import (
"net"
"time"
)
// This is a workaround for https://github.com/golang/oauth2/issues/70, where
// net.Dialer.KeepAlive is unavailable on Go 1.2 (which App Engine as of
// Jan 2015 still runs).
//
// TODO(bradfitz,jbd,adg): remove this once App Engine supports Go
// 1.3+.
func init() {
go13Dialer = func() *net.Dialer {
return &net.Dialer{
Timeout: 750 * time.Millisecond,
KeepAlive: 30 * time.Second,
}
}
}

View File

@@ -0,0 +1,267 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metadata provides access to Google Compute Engine (GCE)
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
// as documented at https://developers.google.com/compute/docs/metadata.
package metadata
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"strings"
"sync"
"time"
"google.golang.org/cloud/internal"
)
type cachedValue struct {
k string
trim bool
mu sync.Mutex
v string
}
var (
projID = &cachedValue{k: "project/project-id", trim: true}
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
instID = &cachedValue{k: "instance/id", trim: true}
)
var metaClient = &http.Client{
Transport: &internal.Transport{
Base: &http.Transport{
Dial: dialer().Dial,
ResponseHeaderTimeout: 750 * time.Millisecond,
},
},
}
// go13Dialer is nil until we're using Go 1.3+.
// This is a workaround for https://github.com/golang/oauth2/issues/70, where
// net.Dialer.KeepAlive is unavailable on Go 1.2 (which App Engine as of
// Jan 2015 still runs).
//
// TODO(bradfitz,jbd,adg,dsymonds): remove this once App Engine supports Go
// 1.3+ and go-app-builder also supports 1.3+, or when Go 1.2 is no longer an
// option on App Engine.
var go13Dialer func() *net.Dialer
func dialer() *net.Dialer {
if fn := go13Dialer; fn != nil {
return fn()
}
return &net.Dialer{
Timeout: 750 * time.Millisecond,
}
}
// NotDefinedError is returned when requested metadata is not defined.
//
// The underlying string is the suffix after "/computeMetadata/v1/".
//
// This error is not returned if the value is defined to be the empty
// string.
type NotDefinedError string
func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://metadata/computeMetadata/v1/".
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func Get(suffix string) (string, error) {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
url := "http://169.254.169.254/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
res, err := metaClient.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
return string(all), nil
}
func getTrimmed(suffix string) (s string, err error) {
s, err = Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *cachedValue) get() (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = getTrimmed(c.k)
} else {
v, err = Get(c.k)
}
if err == nil {
c.v = v
}
return
}
var onGCE struct {
sync.Mutex
set bool
v bool
}
// OnGCE reports whether this process is running on Google Compute Engine.
func OnGCE() bool {
defer onGCE.Unlock()
onGCE.Lock()
if onGCE.set {
return onGCE.v
}
onGCE.set = true
// We use the DNS name of the metadata service here instead of the IP address
// because we expect that to fail faster in the not-on-GCE case.
res, err := metaClient.Get("http://metadata.google.internal")
if err != nil {
return false
}
onGCE.v = res.Header.Get("Metadata-Flavor") == "Google"
return onGCE.v
}
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return projID.get() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return projNum.get() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will probably be of
// the form "INSTANCENAME.c.PROJECT.internal" but that isn't
// guaranteed.
//
// TODO: what is this defined to be? Docs say "The host name of the
// instance."
func Hostname() (string, error) {
return getTrimmed("network-interfaces/0/ip")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) {
var s []string
j, err := Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) {
return instID.get()
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
func lines(suffix string) ([]string, error) {
j, err := Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func InstanceAttributeValue(attr string) (string, error) {
return Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func ProjectAttributeValue(attr string) (string, error) {
return Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
}

View File

@@ -0,0 +1,128 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package internal provides support for the cloud packages.
//
// Users should not import this package directly.
package internal
import (
"fmt"
"net/http"
"sync"
"golang.org/x/net/context"
)
type contextKey struct{}
func WithContext(parent context.Context, projID string, c *http.Client) context.Context {
if c == nil {
panic("nil *http.Client passed to WithContext")
}
if projID == "" {
panic("empty project ID passed to WithContext")
}
return context.WithValue(parent, contextKey{}, &cloudContext{
ProjectID: projID,
HTTPClient: c,
})
}
const userAgent = "gcloud-golang/0.1"
type cloudContext struct {
ProjectID string
HTTPClient *http.Client
mu sync.Mutex // guards svc
svc map[string]interface{} // e.g. "storage" => *rawStorage.Service
}
// Service returns the result of the fill function if it's never been
// called before for the given name (which is assumed to be an API
// service name, like "datastore"). If it has already been cached, the fill
// func is not run.
// It's safe for concurrent use by multiple goroutines.
func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} {
return cc(ctx).service(name, fill)
}
func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} {
c.mu.Lock()
defer c.mu.Unlock()
if c.svc == nil {
c.svc = make(map[string]interface{})
} else if v, ok := c.svc[name]; ok {
return v
}
v := fill(c.HTTPClient)
c.svc[name] = v
return v
}
// Transport is an http.RoundTripper that appends
// Google Cloud client's user-agent to the original
// request's user-agent header.
type Transport struct {
// Base represents the actual http.RoundTripper
// the requests will be delegated to.
Base http.RoundTripper
}
// RoundTrip appends a user-agent to the existing user-agent
// header and delegates the request to the base http.RoundTripper.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
req = cloneRequest(req)
ua := req.Header.Get("User-Agent")
if ua == "" {
ua = userAgent
} else {
ua = fmt.Sprintf("%s %s", ua, userAgent)
}
req.Header.Set("User-Agent", ua)
return t.Base.RoundTrip(req)
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header)
for k, s := range r.Header {
r2.Header[k] = s
}
return r2
}
func ProjID(ctx context.Context) string {
return cc(ctx).ProjectID
}
func HTTPClient(ctx context.Context) *http.Client {
return cc(ctx).HTTPClient
}
// cc returns the internal *cloudContext (cc) state for a context.Context.
// It panics if the user did it wrong.
func cc(ctx context.Context) *cloudContext {
if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok {
return c
}
panic("invalid context.Context type; it should be created with cloud.NewContext")
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,594 @@
// Copyright 2013 Google Inc. All Rights Reserved.
//
// The datastore v1 service proto definitions
syntax = "proto2";
package datastore;
option java_package = "com.google.api.services.datastore";
// An identifier for a particular subset of entities.
//
// Entities are partitioned into various subsets, each used by different
// datasets and different namespaces within a dataset and so forth.
//
// All input partition IDs are normalized before use.
// A partition ID is normalized as follows:
// If the partition ID is unset or is set to an empty partition ID, replace it
// with the context partition ID.
// Otherwise, if the partition ID has no dataset ID, assign it the context
// partition ID's dataset ID.
// Unless otherwise documented, the context partition ID has the dataset ID set
// to the context dataset ID and no other partition dimension set.
//
// A partition ID is empty if all of its fields are unset.
//
// Partition dimension:
// A dimension may be unset.
// A dimension's value must never be "".
// A dimension's value must match [A-Za-z\d\.\-_]{1,100}
// If the value of any dimension matches regex "__.*__",
// the partition is reserved/read-only.
// A reserved/read-only partition ID is forbidden in certain documented contexts.
//
// Dataset ID:
// A dataset id's value must never be "".
// A dataset id's value must match
// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99}
message PartitionId {
// The dataset ID.
optional string dataset_id = 3;
// The namespace.
optional string namespace = 4;
}
// A unique identifier for an entity.
// If a key's partition id or any of its path kinds or names are
// reserved/read-only, the key is reserved/read-only.
// A reserved/read-only key is forbidden in certain documented contexts.
message Key {
// Entities are partitioned into subsets, currently identified by a dataset
// (usually implicitly specified by the project) and namespace ID.
// Queries are scoped to a single partition.
optional PartitionId partition_id = 1;
// A (kind, ID/name) pair used to construct a key path.
//
// At most one of name or ID may be set.
// If either is set, the element is complete.
// If neither is set, the element is incomplete.
message PathElement {
// The kind of the entity.
// A kind matching regex "__.*__" is reserved/read-only.
// A kind must not contain more than 500 characters.
// Cannot be "".
required string kind = 1;
// The ID of the entity.
// Never equal to zero. Values less than zero are discouraged and will not
// be supported in the future.
optional int64 id = 2;
// The name of the entity.
// A name matching regex "__.*__" is reserved/read-only.
// A name must not be more than 500 characters.
// Cannot be "".
optional string name = 3;
}
// The entity path.
// An entity path consists of one or more elements composed of a kind and a
// string or numerical identifier, which identify entities. The first
// element identifies a <em>root entity</em>, the second element identifies
// a <em>child</em> of the root entity, the third element a child of the
// second entity, and so forth. The entities identified by all prefixes of
// the path are called the element's <em>ancestors</em>.
// An entity path is always fully complete: ALL of the entity's ancestors
// are required to be in the path along with the entity identifier itself.
// The only exception is that in some documented cases, the identifier in the
// last path element (for the entity) itself may be omitted. A path can never
// be empty.
repeated PathElement path_element = 2;
}
// A message that can hold any of the supported value types and associated
// metadata.
//
// At most one of the <type>Value fields may be set.
// If none are set the value is "null".
//
message Value {
// A boolean value.
optional bool boolean_value = 1;
// An integer value.
optional int64 integer_value = 2;
// A double value.
optional double double_value = 3;
// A timestamp value.
optional int64 timestamp_microseconds_value = 4;
// A key value.
optional Key key_value = 5;
// A blob key value.
optional string blob_key_value = 16;
// A UTF-8 encoded string value.
optional string string_value = 17;
// A blob value.
optional bytes blob_value = 18;
// An entity value.
// May have no key.
// May have a key with an incomplete key path.
// May have a reserved/read-only key.
optional Entity entity_value = 6;
// A list value.
// Cannot contain another list value.
// Cannot also have a meaning and indexing set.
repeated Value list_value = 7;
// The <code>meaning</code> field is reserved and should not be used.
optional int32 meaning = 14;
// If the value should be indexed.
//
// The <code>indexed</code> property may be set for a
// <code>null</code> value.
// When <code>indexed</code> is <code>true</code>, <code>stringValue</code>
// is limited to 500 characters and the blob value is limited to 500 bytes.
// Exception: If meaning is set to 2, string_value is limited to 2038
// characters regardless of indexed.
// When indexed is true, meaning 15 and 22 are not allowed, and meaning 16
// will be ignored on input (and will never be set on output).
// Input values by default have <code>indexed</code> set to
// <code>true</code>; however, you can explicitly set <code>indexed</code> to
// <code>true</code> if you want. (An output value never has
// <code>indexed</code> explicitly set to <code>true</code>.) If a value is
// itself an entity, it cannot have <code>indexed</code> set to
// <code>true</code>.
// Exception: An entity value with meaning 9, 20 or 21 may be indexed.
optional bool indexed = 15 [default = true];
}
// An entity property.
message Property {
// The name of the property.
// A property name matching regex "__.*__" is reserved.
// A reserved property name is forbidden in certain documented contexts.
// The name must not contain more than 500 characters.
// Cannot be "".
required string name = 1;
// The value(s) of the property.
// Each value can have only one value property populated. For example,
// you cannot have a values list of <code>{ value: { integerValue: 22,
// stringValue: "a" } }</code>, but you can have <code>{ value: { listValue:
// [ { integerValue: 22 }, { stringValue: "a" } ] }</code>.
required Value value = 4;
}
// An entity.
//
// An entity is limited to 1 megabyte when stored. That <em>roughly</em>
// corresponds to a limit of 1 megabyte for the serialized form of this
// message.
message Entity {
// The entity's key.
//
// An entity must have a key, unless otherwise documented (for example,
// an entity in <code>Value.entityValue</code> may have no key).
// An entity's kind is its key's path's last element's kind,
// or null if it has no key.
optional Key key = 1;
// The entity's properties.
// Each property's name must be unique for its entity.
repeated Property property = 2;
}
// The result of fetching an entity from the datastore.
message EntityResult {
// Specifies what data the 'entity' field contains.
// A ResultType is either implied (for example, in LookupResponse.found it
// is always FULL) or specified by context (for example, in message
// QueryResultBatch, field 'entity_result_type' specifies a ResultType
// for all the values in field 'entity_result').
enum ResultType {
FULL = 1; // The entire entity.
PROJECTION = 2; // A projected subset of properties.
// The entity may have no key.
// A property value may have meaning 18.
KEY_ONLY = 3; // Only the key.
}
// The resulting entity.
required Entity entity = 1;
}
// A query.
message Query {
// The projection to return. If not set the entire entity is returned.
repeated PropertyExpression projection = 2;
// The kinds to query (if empty, returns entities from all kinds).
repeated KindExpression kind = 3;
// The filter to apply (optional).
optional Filter filter = 4;
// The order to apply to the query results (if empty, order is unspecified).
repeated PropertyOrder order = 5;
// The properties to group by (if empty, no grouping is applied to the
// result set).
repeated PropertyReference group_by = 6;
// A starting point for the query results. Optional. Query cursors are
// returned in query result batches.
optional bytes /* serialized QueryCursor */ start_cursor = 7;
// An ending point for the query results. Optional. Query cursors are
// returned in query result batches.
optional bytes /* serialized QueryCursor */ end_cursor = 8;
// The number of results to skip. Applies before limit, but after all other
// constraints (optional, defaults to 0).
optional int32 offset = 10 [default=0];
// The maximum number of results to return. Applies after all other
// constraints. Optional.
optional int32 limit = 11;
}
// A representation of a kind.
message KindExpression {
// The name of the kind.
required string name = 1;
}
// A reference to a property relative to the kind expressions.
// exactly.
message PropertyReference {
// The name of the property.
required string name = 2;
}
// A representation of a property in a projection.
message PropertyExpression {
enum AggregationFunction {
FIRST = 1;
}
// The property to project.
required PropertyReference property = 1;
// The aggregation function to apply to the property. Optional.
// Can only be used when grouping by at least one property. Must
// then be set on all properties in the projection that are not
// being grouped by.
optional AggregationFunction aggregation_function = 2;
}
// The desired order for a specific property.
message PropertyOrder {
enum Direction {
ASCENDING = 1;
DESCENDING = 2;
}
// The property to order by.
required PropertyReference property = 1;
// The direction to order by.
optional Direction direction = 2 [default=ASCENDING];
}
// A holder for any type of filter. Exactly one field should be specified.
message Filter {
// A composite filter.
optional CompositeFilter composite_filter = 1;
// A filter on a property.
optional PropertyFilter property_filter = 2;
}
// A filter that merges the multiple other filters using the given operation.
message CompositeFilter {
enum Operator {
AND = 1;
}
// The operator for combining multiple filters.
required Operator operator = 1;
// The list of filters to combine.
// Must contain at least one filter.
repeated Filter filter = 2;
}
// A filter on a specific property.
message PropertyFilter {
enum Operator {
LESS_THAN = 1;
LESS_THAN_OR_EQUAL = 2;
GREATER_THAN = 3;
GREATER_THAN_OR_EQUAL = 4;
EQUAL = 5;
HAS_ANCESTOR = 11;
}
// The property to filter by.
required PropertyReference property = 1;
// The operator to filter by.
required Operator operator = 2;
// The value to compare the property to.
required Value value = 3;
}
// A GQL query.
message GqlQuery {
required string query_string = 1;
// When false, the query string must not contain a literal.
optional bool allow_literal = 2 [default = false];
// A named argument must set field GqlQueryArg.name.
// No two named arguments may have the same name.
// For each non-reserved named binding site in the query string,
// there must be a named argument with that name,
// but not necessarily the inverse.
repeated GqlQueryArg name_arg = 3;
// Numbered binding site @1 references the first numbered argument,
// effectively using 1-based indexing, rather than the usual 0.
// A numbered argument must NOT set field GqlQueryArg.name.
// For each binding site numbered i in query_string,
// there must be an ith numbered argument.
// The inverse must also be true.
repeated GqlQueryArg number_arg = 4;
}
// A binding argument for a GQL query.
// Exactly one of fields value and cursor must be set.
message GqlQueryArg {
// Must match regex "[A-Za-z_$][A-Za-z_$0-9]*".
// Must not match regex "__.*__".
// Must not be "".
optional string name = 1;
optional Value value = 2;
optional bytes cursor = 3;
}
// A batch of results produced by a query.
message QueryResultBatch {
// The possible values for the 'more_results' field.
enum MoreResultsType {
NOT_FINISHED = 1; // There are additional batches to fetch from this query.
MORE_RESULTS_AFTER_LIMIT = 2; // The query is finished, but there are more
// results after the limit.
NO_MORE_RESULTS = 3; // The query has been exhausted.
}
// The result type for every entity in entityResults.
required EntityResult.ResultType entity_result_type = 1;
// The results for this batch.
repeated EntityResult entity_result = 2;
// A cursor that points to the position after the last result in the batch.
// May be absent.
optional bytes /* serialized QueryCursor */ end_cursor = 4;
// The state of the query after the current batch.
required MoreResultsType more_results = 5;
// The number of results skipped because of <code>Query.offset</code>.
optional int32 skipped_results = 6;
}
// A set of changes to apply.
//
// No entity in this message may have a reserved property name,
// not even a property in an entity in a value.
// No value in this message may have meaning 18,
// not even a value in an entity in another value.
//
// If entities with duplicate keys are present, an arbitrary choice will
// be made as to which is written.
message Mutation {
// Entities to upsert.
// Each upserted entity's key must have a complete path and
// must not be reserved/read-only.
repeated Entity upsert = 1;
// Entities to update.
// Each updated entity's key must have a complete path and
// must not be reserved/read-only.
repeated Entity update = 2;
// Entities to insert.
// Each inserted entity's key must have a complete path and
// must not be reserved/read-only.
repeated Entity insert = 3;
// Insert entities with a newly allocated ID.
// Each inserted entity's key must omit the final identifier in its path and
// must not be reserved/read-only.
repeated Entity insert_auto_id = 4;
// Keys of entities to delete.
// Each key must have a complete key path and must not be reserved/read-only.
repeated Key delete = 5;
// Ignore a user specified read-only period. Optional.
optional bool force = 6;
}
// The result of applying a mutation.
message MutationResult {
// Number of index writes.
required int32 index_updates = 1;
// Keys for <code>insertAutoId</code> entities. One per entity from the
// request, in the same order.
repeated Key insert_auto_id_key = 2;
}
// Options shared by read requests.
message ReadOptions {
enum ReadConsistency {
DEFAULT = 0;
STRONG = 1;
EVENTUAL = 2;
}
// The read consistency to use.
// Cannot be set when transaction is set.
// Lookup and ancestor queries default to STRONG, global queries default to
// EVENTUAL and cannot be set to STRONG.
optional ReadConsistency read_consistency = 1 [default=DEFAULT];
// The transaction to use. Optional.
optional bytes /* serialized Transaction */ transaction = 2;
}
// The request for Lookup.
message LookupRequest {
// Options for this lookup request. Optional.
optional ReadOptions read_options = 1;
// Keys of entities to look up from the datastore.
repeated Key key = 3;
}
// The response for Lookup.
message LookupResponse {
// The order of results in these fields is undefined and has no relation to
// the order of the keys in the input.
// Entities found as ResultType.FULL entities.
repeated EntityResult found = 1;
// Entities not found as ResultType.KEY_ONLY entities.
repeated EntityResult missing = 2;
// A list of keys that were not looked up due to resource constraints.
repeated Key deferred = 3;
}
// The request for RunQuery.
message RunQueryRequest {
// The options for this query.
optional ReadOptions read_options = 1;
// Entities are partitioned into subsets, identified by a dataset (usually
// implicitly specified by the project) and namespace ID. Queries are scoped
// to a single partition.
// This partition ID is normalized with the standard default context
// partition ID, but all other partition IDs in RunQueryRequest are
// normalized with this partition ID as the context partition ID.
optional PartitionId partition_id = 2;
// The query to run.
// Either this field or field gql_query must be set, but not both.
optional Query query = 3;
// The GQL query to run.
// Either this field or field query must be set, but not both.
optional GqlQuery gql_query = 7;
}
// The response for RunQuery.
message RunQueryResponse {
// A batch of query results (always present).
optional QueryResultBatch batch = 1;
}
// The request for BeginTransaction.
message BeginTransactionRequest {
enum IsolationLevel {
SNAPSHOT = 0; // Read from a consistent snapshot. Concurrent transactions
// conflict if their mutations conflict. For example:
// Read(A),Write(B) may not conflict with Read(B),Write(A),
// but Read(B),Write(B) does conflict with Read(B),Write(B).
SERIALIZABLE = 1; // Read from a consistent snapshot. Concurrent
// transactions conflict if they cannot be serialized.
// For example Read(A),Write(B) does conflict with
// Read(B),Write(A) but Read(A) may not conflict with
// Write(A).
}
// The transaction isolation level.
optional IsolationLevel isolation_level = 1 [default=SNAPSHOT];
}
// The response for BeginTransaction.
message BeginTransactionResponse {
// The transaction identifier (always present).
optional bytes /* serialized Transaction */ transaction = 1;
}
// The request for Rollback.
message RollbackRequest {
// The transaction identifier, returned by a call to
// <code>beginTransaction</code>.
required bytes /* serialized Transaction */ transaction = 1;
}
// The response for Rollback.
message RollbackResponse {
// Empty
}
// The request for Commit.
message CommitRequest {
enum Mode {
TRANSACTIONAL = 1;
NON_TRANSACTIONAL = 2;
}
// The transaction identifier, returned by a call to
// <code>beginTransaction</code>. Must be set when mode is TRANSACTIONAL.
optional bytes /* serialized Transaction */ transaction = 1;
// The mutation to perform. Optional.
optional Mutation mutation = 2;
// The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL.
optional Mode mode = 5 [default=TRANSACTIONAL];
}
// The response for Commit.
message CommitResponse {
// The result of performing the mutation (if any).
optional MutationResult mutation_result = 1;
}
// The request for AllocateIds.
message AllocateIdsRequest {
// A list of keys with incomplete key paths to allocate IDs for.
// No key may be reserved/read-only.
repeated Key key = 1;
}
// The response for AllocateIds.
message AllocateIdsResponse {
// The keys specified in the request (in the same order), each with
// its key path completed with a newly allocated ID.
repeated Key key = 1;
}
// Each rpc normalizes the partition IDs of the keys in its input entities,
// and always returns entities with keys with normalized partition IDs.
// (Note that applies to all entities, including entities in values.)
service DatastoreService {
// Look up some entities by key.
rpc Lookup(LookupRequest) returns (LookupResponse) {
};
// Query for entities.
rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) {
};
// Begin a new transaction.
rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) {
};
// Commit a transaction, optionally creating, deleting or modifying some
// entities.
rpc Commit(CommitRequest) returns (CommitResponse) {
};
// Roll back a transaction.
rpc Rollback(RollbackRequest) returns (RollbackResponse) {
};
// Allocate IDs for incomplete keys (useful for referencing an entity before
// it is inserted).
rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) {
};
}

View File

@@ -0,0 +1,57 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package testutil contains helper functions for writing tests.
package testutil
import (
"io/ioutil"
"log"
"net/http"
"os"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/cloud"
)
const (
envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID"
envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY"
)
func Context(scopes ...string) context.Context {
key, projID := os.Getenv(envPrivateKey), os.Getenv(envProjID)
if key == "" || projID == "" {
log.Fatal("GCLOUD_TESTS_GOLANG_KEY and GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.")
}
jsonKey, err := ioutil.ReadFile(key)
if err != nil {
log.Fatalf("Cannot read the JSON key file, err: %v", err)
}
conf, err := google.JWTConfigFromJSON(jsonKey, scopes...)
if err != nil {
log.Fatal(err)
}
return cloud.NewContext(projID, conf.Client(oauth2.NoContext))
}
func NoAuthContext() context.Context {
projID := os.Getenv(envProjID)
if projID == "" {
log.Fatal("GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.")
}
return cloud.NewContext(projID, &http.Client{Transport: http.DefaultTransport})
}