vendor: Add GCP dependencies and update crypto

Signed-off-by: Dave Tucker <dt@docker.com>
This commit is contained in:
Dave Tucker 2017-04-05 01:25:47 +01:00
parent db10280f5f
commit df340fd559
61 changed files with 72671 additions and 3276 deletions

View File

@ -1,9 +1,10 @@
cloud.google.com/go/storage 0bee953ffbf6fda3be3f56f1e9578f16a1e978a4
github.com/google/google-api-go-client 16ab375f94503bfa0d19db78e96bffbe1a34354f
github.com/Masterminds/semver 312afcd0e81e5cf81fdc3cfd0e8504ae031521c8
github.com/Masterminds/sprig 01a849f546a584d7b29bfee253e7db0aed44f7ba
github.com/Sirupsen/logrus 10f801ebc38b33738c9d17d50860f484a0988ff5
github.com/aokoli/goutils 9c37978a95bd5c709a15883b6242714ea6709e64
github.com/armon/go-radix 4239b77079c7b5d1243b7b4736304ce8ddb6f0f2
github.com/docker/docker 8d96619e5a367798cffcb740cfc41e0a505a5232
github.com/docker/distribution 07f32ac1831ed0fc71960b7da5d6bb83cb6881b5
github.com/docker/engine-api cf82c64276ebc2501e72b241f9fdc1e21e421743
github.com/docker/go-connections e15c02316c12de00874640cd76311849de2aeed5
@ -29,7 +30,7 @@ github.com/spf13/cobra 7be4beda01ec05d0b93d80b3facd2b6f44080d94
github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7
github.com/surma/gocpio fcb68777e7dc4ea43ffce871b552c0d073c17495
github.com/vaughan0/go-ini a98ad7ee00ec53921f08832bc06ecf7fd600e6a1
golang.org/x/crypto 459e26527287adbc2adcc5d0d49abff9a5f315a7
golang.org/x/crypto 573951cbe80bb6352881271bb276f48749eab6f4
golang.org/x/net/context a6577fac2d73be281a500b310739095313165611
golang.org/x/oauth2 1611bb46e67abc64a71ecc5c3ae67f1cbbc2b921
golang.org/x/sys 99f16d856c9836c42d24e7ab64ea72916925fa97

View File

@ -1,94 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package optional provides versions of primitive types that can
// be nil. These are useful in methods that update some of an API object's
// fields.
package optional
import (
"fmt"
"strings"
)
type (
// Bool is either a bool or nil.
Bool interface{}
// String is either a string or nil.
String interface{}
// Int is either an int or nil.
Int interface{}
// Uint is either a uint or nil.
Uint interface{}
// Float64 is either a float64 or nil.
Float64 interface{}
)
// ToBool returns its argument as a bool.
// It panics if its argument is nil or not a bool.
func ToBool(v Bool) bool {
x, ok := v.(bool)
if !ok {
doPanic("Bool", v)
}
return x
}
// ToString returns its argument as a string.
// It panics if its argument is nil or not a string.
func ToString(v String) string {
x, ok := v.(string)
if !ok {
doPanic("String", v)
}
return x
}
// ToInt returns its argument as an int.
// It panics if its argument is nil or not an int.
func ToInt(v Int) int {
x, ok := v.(int)
if !ok {
doPanic("Int", v)
}
return x
}
// ToUint returns its argument as a uint.
// It panics if its argument is nil or not a uint.
func ToUint(v Uint) uint {
x, ok := v.(uint)
if !ok {
doPanic("Uint", v)
}
return x
}
// ToFloat64 returns its argument as a float64.
// It panics if its argument is nil or not a float64.
func ToFloat64(v Float64) float64 {
x, ok := v.(float64)
if !ok {
doPanic("Float64", v)
}
return x
}
func doPanic(capType string, v interface{}) {
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
}

View File

@ -1,223 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"fmt"
"golang.org/x/net/context"
raw "google.golang.org/api/storage/v1"
)
// ACLRole is the level of access to grant.
type ACLRole string
const (
RoleOwner ACLRole = "OWNER"
RoleReader ACLRole = "READER"
RoleWriter ACLRole = "WRITER"
)
// ACLEntity refers to a user or group.
// They are sometimes referred to as grantees.
//
// It could be in the form of:
// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
// "domain-<domain>" and "project-team-<projectId>".
//
// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
type ACLEntity string
const (
AllUsers ACLEntity = "allUsers"
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
)
// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket.
type ACLRule struct {
Entity ACLEntity
Role ACLRole
}
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
type ACLHandle struct {
c *Client
bucket string
object string
isDefault bool
}
// Delete permanently deletes the ACL entry for the given entity.
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
if a.object != "" {
return a.objectDelete(ctx, entity)
}
if a.isDefault {
return a.bucketDefaultDelete(ctx, entity)
}
return a.bucketDelete(ctx, entity)
}
// Set sets the permission level for the given entity.
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error {
if a.object != "" {
return a.objectSet(ctx, entity, role)
}
if a.isDefault {
return a.bucketDefaultSet(ctx, entity, role)
}
return a.bucketSet(ctx, entity, role)
}
// List retrieves ACL entries.
func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) {
if a.object != "" {
return a.objectList(ctx)
}
if a.isDefault {
return a.bucketDefaultList(ctx)
}
return a.bucketList(ctx)
}
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.ObjectAccessControls
var err error
err = runWithRetry(ctx, func() error {
acls, err = a.c.raw.DefaultObjectAccessControls.List(a.bucket).Context(ctx).Do()
return err
})
if err != nil {
return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err)
}
return toACLRules(acls.Items), nil
}
func (a *ACLHandle) bucketDefaultSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
acl := &raw.ObjectAccessControl{
Bucket: a.bucket,
Entity: string(entity),
Role: string(role),
}
err := runWithRetry(ctx, func() error {
_, err := a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do()
return err
})
if err != nil {
return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
}
return nil
}
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
err := runWithRetry(ctx, func() error {
return a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do()
})
if err != nil {
return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
}
return nil
}
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.BucketAccessControls
var err error
err = runWithRetry(ctx, func() error {
acls, err = a.c.raw.BucketAccessControls.List(a.bucket).Context(ctx).Do()
return err
})
if err != nil {
return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err)
}
r := make([]ACLRule, len(acls.Items))
for i, v := range acls.Items {
r[i].Entity = ACLEntity(v.Entity)
r[i].Role = ACLRole(v.Role)
}
return r, nil
}
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
acl := &raw.BucketAccessControl{
Bucket: a.bucket,
Entity: string(entity),
Role: string(role),
}
err := runWithRetry(ctx, func() error {
_, err := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do()
return err
})
if err != nil {
return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
}
return nil
}
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
err := runWithRetry(ctx, func() error {
return a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do()
})
if err != nil {
return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
}
return nil
}
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.ObjectAccessControls
var err error
err = runWithRetry(ctx, func() error {
acls, err = a.c.raw.ObjectAccessControls.List(a.bucket, a.object).Context(ctx).Do()
return err
})
if err != nil {
return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err)
}
return toACLRules(acls.Items), nil
}
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
acl := &raw.ObjectAccessControl{
Bucket: a.bucket,
Entity: string(entity),
Role: string(role),
}
err := runWithRetry(ctx, func() error {
_, err := a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl).Context(ctx).Do()
return err
})
if err != nil {
return fmt.Errorf("storage: error updating object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
}
return nil
}
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
err := runWithRetry(ctx, func() error {
return a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)).Context(ctx).Do()
})
if err != nil {
return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
}
return nil
}
func toACLRules(items []*raw.ObjectAccessControl) []ACLRule {
r := make([]ACLRule, 0, len(items))
for _, item := range items {
r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)})
}
return r
}

View File

@ -1,331 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"net/http"
"time"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
raw "google.golang.org/api/storage/v1"
)
// Create creates the Bucket in the project.
// If attrs is nil the API defaults will be used.
func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error {
var bkt *raw.Bucket
if attrs != nil {
bkt = attrs.toRawBucket()
} else {
bkt = &raw.Bucket{}
}
bkt.Name = b.name
req := b.c.raw.Buckets.Insert(projectID, bkt)
return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
}
// Delete deletes the Bucket.
func (b *BucketHandle) Delete(ctx context.Context) error {
req := b.c.raw.Buckets.Delete(b.name)
return runWithRetry(ctx, func() error { return req.Context(ctx).Do() })
}
// ACL returns an ACLHandle, which provides access to the bucket's access control list.
// This controls who can list, create or overwrite the objects in a bucket.
// This call does not perform any network operations.
func (b *BucketHandle) ACL() *ACLHandle {
return &b.acl
}
// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs.
// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL.
// This call does not perform any network operations.
func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
return &b.defaultObjectACL
}
// Object returns an ObjectHandle, which provides operations on the named object.
// This call does not perform any network operations.
//
// name must consist entirely of valid UTF-8-encoded runes. The full specification
// for valid object names can be found at:
// https://cloud.google.com/storage/docs/bucket-naming
func (b *BucketHandle) Object(name string) *ObjectHandle {
return &ObjectHandle{
c: b.c,
bucket: b.name,
object: name,
acl: ACLHandle{
c: b.c,
bucket: b.name,
object: name,
},
gen: -1,
}
}
// Attrs returns the metadata for the bucket.
func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
var resp *raw.Bucket
var err error
err = runWithRetry(ctx, func() error {
resp, err = b.c.raw.Buckets.Get(b.name).Projection("full").Context(ctx).Do()
return err
})
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
return nil, ErrBucketNotExist
}
if err != nil {
return nil, err
}
return newBucket(resp), nil
}
// BucketAttrs represents the metadata for a Google Cloud Storage bucket.
type BucketAttrs struct {
// Name is the name of the bucket.
Name string
// ACL is the list of access control rules on the bucket.
ACL []ACLRule
// DefaultObjectACL is the list of access controls to
// apply to new objects when no object ACL is provided.
DefaultObjectACL []ACLRule
// Location is the location of the bucket. It defaults to "US".
Location string
// MetaGeneration is the metadata generation of the bucket.
MetaGeneration int64
// StorageClass is the default storage class of the bucket. This defines
// how objects in the bucket are stored and determines the SLA
// and the cost of storage. Typical values are "MULTI_REGIONAL",
// "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and
// "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which
// is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on
// the bucket's location settings.
StorageClass string
// Created is the creation time of the bucket.
Created time.Time
// VersioningEnabled reports whether this bucket has versioning enabled.
// This field is read-only.
VersioningEnabled bool
}
func newBucket(b *raw.Bucket) *BucketAttrs {
if b == nil {
return nil
}
bucket := &BucketAttrs{
Name: b.Name,
Location: b.Location,
MetaGeneration: b.Metageneration,
StorageClass: b.StorageClass,
Created: convertTime(b.TimeCreated),
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
}
acl := make([]ACLRule, len(b.Acl))
for i, rule := range b.Acl {
acl[i] = ACLRule{
Entity: ACLEntity(rule.Entity),
Role: ACLRole(rule.Role),
}
}
bucket.ACL = acl
objACL := make([]ACLRule, len(b.DefaultObjectAcl))
for i, rule := range b.DefaultObjectAcl {
objACL[i] = ACLRule{
Entity: ACLEntity(rule.Entity),
Role: ACLRole(rule.Role),
}
}
bucket.DefaultObjectACL = objACL
return bucket
}
// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
func (b *BucketAttrs) toRawBucket() *raw.Bucket {
var acl []*raw.BucketAccessControl
if len(b.ACL) > 0 {
acl = make([]*raw.BucketAccessControl, len(b.ACL))
for i, rule := range b.ACL {
acl[i] = &raw.BucketAccessControl{
Entity: string(rule.Entity),
Role: string(rule.Role),
}
}
}
dACL := toRawObjectACL(b.DefaultObjectACL)
return &raw.Bucket{
Name: b.Name,
DefaultObjectAcl: dACL,
Location: b.Location,
StorageClass: b.StorageClass,
Acl: acl,
}
}
// Objects returns an iterator over the objects in the bucket that match the Query q.
// If q is nil, no filtering is done.
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
it := &ObjectIterator{
ctx: ctx,
bucket: b,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.items) },
func() interface{} { b := it.items; it.items = nil; return b })
if q != nil {
it.query = *q
}
return it
}
// An ObjectIterator is an iterator over ObjectAttrs.
type ObjectIterator struct {
ctx context.Context
bucket *BucketHandle
query Query
pageInfo *iterator.PageInfo
nextFunc func() error
items []*ObjectAttrs
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
// Next returns the next result. Its second return value is iterator.Done if
// there are no more results. Once Next returns iterator.Done, all subsequent
// calls will return iterator.Done.
//
// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will
// have a non-empty Prefix field, and a zero value for all other fields. These
// represent prefixes.
func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
if err := it.nextFunc(); err != nil {
return nil, err
}
item := it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) {
req := it.bucket.c.raw.Objects.List(it.bucket.name)
req.Projection("full")
req.Delimiter(it.query.Delimiter)
req.Prefix(it.query.Prefix)
req.Versions(it.query.Versions)
req.PageToken(pageToken)
if pageSize > 0 {
req.MaxResults(int64(pageSize))
}
var resp *raw.Objects
var err error
err = runWithRetry(it.ctx, func() error {
resp, err = req.Context(it.ctx).Do()
return err
})
if err != nil {
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
err = ErrBucketNotExist
}
return "", err
}
for _, item := range resp.Items {
it.items = append(it.items, newObject(item))
}
for _, prefix := range resp.Prefixes {
it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
}
return resp.NextPageToken, nil
}
// TODO(jbd): Add storage.buckets.update.
// Buckets returns an iterator over the buckets in the project. You may
// optionally set the iterator's Prefix field to restrict the list to buckets
// whose names begin with the prefix. By default, all buckets in the project
// are returned.
func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator {
it := &BucketIterator{
ctx: ctx,
client: c,
projectID: projectID,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.buckets) },
func() interface{} { b := it.buckets; it.buckets = nil; return b })
return it
}
// A BucketIterator is an iterator over BucketAttrs.
type BucketIterator struct {
// Prefix restricts the iterator to buckets whose names begin with it.
Prefix string
ctx context.Context
client *Client
projectID string
buckets []*BucketAttrs
pageInfo *iterator.PageInfo
nextFunc func() error
}
// Next returns the next result. Its second return value is iterator.Done if
// there are no more results. Once Next returns iterator.Done, all subsequent
// calls will return iterator.Done.
func (it *BucketIterator) Next() (*BucketAttrs, error) {
if err := it.nextFunc(); err != nil {
return nil, err
}
b := it.buckets[0]
it.buckets = it.buckets[1:]
return b, nil
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) {
req := it.client.raw.Buckets.List(it.projectID)
req.Projection("full")
req.Prefix(it.Prefix)
req.PageToken(pageToken)
if pageSize > 0 {
req.MaxResults(int64(pageSize))
}
var resp *raw.Buckets
var err error
err = runWithRetry(it.ctx, func() error {
resp, err = req.Context(it.ctx).Do()
return err
})
if err != nil {
return "", err
}
for _, item := range resp.Items {
it.buckets = append(it.buckets, newBucket(item))
}
return resp.NextPageToken, nil
}

View File

@ -1,190 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package storage contains a Google Cloud Storage client.
//
// This package is experimental and may make backwards-incompatible changes.
package storage
import (
"errors"
"fmt"
"golang.org/x/net/context"
raw "google.golang.org/api/storage/v1"
)
// CopierFrom creates a Copier that can copy src to dst.
// You can immediately call Run on the returned Copier, or
// you can configure it first.
func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier {
return &Copier{dst: dst, src: src}
}
// A Copier copies a source object to a destination.
type Copier struct {
// ObjectAttrs are optional attributes to set on the destination object.
// Any attributes must be initialized before any calls on the Copier. Nil
// or zero-valued attributes are ignored.
ObjectAttrs
// RewriteToken can be set before calling Run to resume a copy
// operation. After Run returns a non-nil error, RewriteToken will
// have been updated to contain the value needed to resume the copy.
RewriteToken string
// ProgressFunc can be used to monitor the progress of a multi-RPC copy
// operation. If ProgressFunc is not nil and CopyFrom requires multiple
// calls to the underlying service (see
// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
// ProgressFunc will be invoked after each call with the number of bytes of
// content copied so far and the total size in bytes of the source object.
//
// ProgressFunc is intended to make upload progress available to the
// application. For example, the implementation of ProgressFunc may update
// a progress bar in the application's UI, or log the result of
// float64(copiedBytes)/float64(totalBytes).
//
// ProgressFunc should return quickly without blocking.
ProgressFunc func(copiedBytes, totalBytes uint64)
dst, src *ObjectHandle
}
// Run performs the copy.
func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) {
if err := c.src.validate(); err != nil {
return nil, err
}
if err := c.dst.validate(); err != nil {
return nil, err
}
// Convert destination attributes to raw form, omitting the bucket.
// If the bucket is included but name or content-type aren't, the service
// returns a 400 with "Required" as the only message. Omitting the bucket
// does not cause any problems.
rawObject := c.ObjectAttrs.toRawObject("")
for {
res, err := c.callRewrite(ctx, c.src, rawObject)
if err != nil {
return nil, err
}
if c.ProgressFunc != nil {
c.ProgressFunc(res.TotalBytesRewritten, res.ObjectSize)
}
if res.Done { // Finished successfully.
return newObject(res.Resource), nil
}
}
return nil, nil
}
func (c *Copier) callRewrite(ctx context.Context, src *ObjectHandle, rawObj *raw.Object) (*raw.RewriteResponse, error) {
call := c.dst.c.raw.Objects.Rewrite(src.bucket, src.object, c.dst.bucket, c.dst.object, rawObj)
call.Context(ctx).Projection("full")
if c.RewriteToken != "" {
call.RewriteToken(c.RewriteToken)
}
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err
}
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
return nil, err
}
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
return nil, err
}
if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
return nil, err
}
var res *raw.RewriteResponse
var err error
err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
if err != nil {
return nil, err
}
c.RewriteToken = res.RewriteToken
return res, nil
}
// ComposerFrom creates a Composer that can compose srcs into dst.
// You can immediately call Run on the returned Composer, or you can
// configure it first.
//
// The encryption key for the destination object will be used to decrypt all
// source objects and encrypt the destination object. It is an error
// to specify an encryption key for any of the source objects.
func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer {
return &Composer{dst: dst, srcs: srcs}
}
// A Composer composes source objects into a destination object.
type Composer struct {
// ObjectAttrs are optional attributes to set on the destination object.
// Any attributes must be initialized before any calls on the Composer. Nil
// or zero-valued attributes are ignored.
ObjectAttrs
dst *ObjectHandle
srcs []*ObjectHandle
}
// Run performs the compose operation.
func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
if err := c.dst.validate(); err != nil {
return nil, err
}
if len(c.srcs) == 0 {
return nil, errors.New("storage: at least one source object must be specified")
}
req := &raw.ComposeRequest{}
// Compose requires a non-empty Destination, so we always set it,
// even if the caller-provided ObjectAttrs is the zero value.
req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
for _, src := range c.srcs {
if err := src.validate(); err != nil {
return nil, err
}
if src.bucket != c.dst.bucket {
return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket)
}
if src.encryptionKey != nil {
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
}
srcObj := &raw.ComposeRequestSourceObjects{
Name: src.object,
}
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
return nil, err
}
req.SourceObjects = append(req.SourceObjects, srcObj)
}
call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err
}
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
var err error
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if err != nil {
return nil, err
}
return newObject(obj), nil
}

View File

@ -1,161 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package storage provides an easy way to work with Google Cloud Storage.
Google Cloud Storage stores data in named objects, which are grouped into buckets.
More information about Google Cloud Storage is available at
https://cloud.google.com/storage/docs.
All of the methods of this package use exponential backoff to retry calls
that fail with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff.
Note: This package is in beta. Some backwards-incompatible changes may occur.
Creating a Client
To start working with this package, create a client:
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
Buckets
A Google Cloud Storage bucket is a collection of objects. To work with a
bucket, make a bucket handle:
bkt := client.Bucket(bucketName)
A handle is a reference to a bucket. You can have a handle even if the
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
call Create on the handle:
if err := bkt.Create(ctx, projectID, nil); err != nil {
// TODO: Handle error.
}
Note that although buckets are associated with projects, bucket names are
global across all projects.
Each bucket has associated metadata, represented in this package by
BucketAttrs. The third argument to BucketHandle.Create allows you to set
the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
Attrs:
attrs, err := bkt.Attrs(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
Objects
An object holds arbitrary data as a sequence of bytes, like a file. You
refer to objects using a handle, just as with buckets. You can use the
standard Go io.Reader and io.Writer interfaces to read and write
object data:
obj := bkt.Object("data")
// Write something to obj.
// w implements io.Writer.
w := obj.NewWriter(ctx)
// Write some text to obj. This will overwrite whatever is there.
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
// TODO: Handle error.
}
// Close, just like writing a file.
if err := w.Close(); err != nil {
// TODO: Handle error.
}
// Read it back.
r, err := obj.NewReader(ctx)
if err != nil {
// TODO: Handle error.
}
defer r.Close()
if _, err := io.Copy(os.Stdout, r); err != nil {
// TODO: Handle error.
}
// Prints "This object contains text."
Objects also have attributes, which you can fetch with Attrs:
objAttrs, err := obj.Attrs(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Printf("object %s has size %d and can be read using %s\n",
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
ACLs
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
ACLRules, each of which specifies the role of a user, group or project. ACLs
are suitable for fine-grained control, but you may prefer using IAM to control
access at the project level (see
https://cloud.google.com/storage/docs/access-control/iam).
To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:
acls, err := obj.ACL().List(ctx)
if err != nil {
// TODO: Handle error.
}
for _, rule := range acls {
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
}
You can also set and delete ACLs.
Conditions
Every object has a generation and a metageneration. The generation changes
whenever the content changes, and the metageneration changes whenever the
metadata changes. Conditions let you check these values before an operation;
the operation only executes if the conditions match. You can use conditions to
prevent race conditions in read-modify-write operations.
For example, say you've read an object's metadata into objAttrs. Now
you want to write to that object, but only if its contents haven't changed
since you read it. Here is how to express that:
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
// Proceed with writing as above.
Signed URLs
You can obtain a URL that lets anyone read or write an object for a limited time.
You don't need to create a client to do this. See the documentation of
SignedURL for details.
url, err := storage.SignedURL(bucketName, "shared-object", opts)
if err != nil {
// TODO: Handle error.
}
fmt.Println(url)
Authentication
See examples of authorization and authentication at
https://godoc.org/cloud.google.com/go#pkg-examples.
*/
package storage // import "cloud.google.com/go/storage"

View File

@ -1,43 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"cloud.google.com/go/internal"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
)
// runWithRetry calls the function until it returns nil or a non-retryable error, or
// the context is done.
func runWithRetry(ctx context.Context, call func() error) error {
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
err = call()
if err == nil {
return true, nil
}
e, ok := err.(*googleapi.Error)
if !ok {
return true, err
}
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
if e.Code == 429 || (e.Code >= 500 && e.Code < 600) {
return false, nil
}
return true, err
})
}

View File

@ -1,57 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"io"
)
// Reader reads a Cloud Storage object.
// It implements io.Reader.
type Reader struct {
body io.ReadCloser
remain, size int64
contentType string
}
// Close closes the Reader. It must be called when done reading.
func (r *Reader) Close() error {
return r.body.Close()
}
func (r *Reader) Read(p []byte) (int, error) {
n, err := r.body.Read(p)
if r.remain != -1 {
r.remain -= int64(n)
}
return n, err
}
// Size returns the size of the object in bytes.
// The returned value is always the same and is not affected by
// calls to Read or Close.
func (r *Reader) Size() int64 {
return r.size
}
// Remain returns the number of bytes left to read, or -1 if unknown.
func (r *Reader) Remain() int64 {
return r.remain
}
// ContentType returns the content type of the object.
func (r *Reader) ContentType() string {
return r.contentType
}

File diff suppressed because it is too large Load Diff

View File

@ -1,150 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"errors"
"fmt"
"io"
"unicode/utf8"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
raw "google.golang.org/api/storage/v1"
)
// A Writer writes a Cloud Storage object.
type Writer struct {
// ObjectAttrs are optional attributes to set on the object. Any attributes
// must be initialized before the first Write call. Nil or zero-valued
// attributes are ignored.
ObjectAttrs
// ChunkSize controls the maximum number of bytes of the object that the
// Writer will attempt to send to the server in a single request. Objects
// smaller than the size will be sent in a single request, while larger
// objects will be split over multiple requests. The size will be rounded up
// to the nearest multiple of 256K. If zero, chunking will be disabled and
// the object will be uploaded in a single request.
//
// ChunkSize will default to a reasonable value. Any custom configuration
// must be done before the first Write call.
ChunkSize int
ctx context.Context
o *ObjectHandle
opened bool
pw *io.PipeWriter
donec chan struct{} // closed after err and obj are set.
err error
obj *ObjectAttrs
}
func (w *Writer) open() error {
attrs := w.ObjectAttrs
// Check the developer didn't change the object Name (this is unfortunate, but
// we don't want to store an object under the wrong name).
if attrs.Name != w.o.object {
return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object)
}
if !utf8.ValidString(attrs.Name) {
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
}
pr, pw := io.Pipe()
w.pw = pw
w.opened = true
if w.ChunkSize < 0 {
return errors.New("storage: Writer.ChunkSize must non-negative")
}
mediaOpts := []googleapi.MediaOption{
googleapi.ChunkSize(w.ChunkSize),
}
if c := attrs.ContentType; c != "" {
mediaOpts = append(mediaOpts, googleapi.ContentType(c))
}
go func() {
defer close(w.donec)
call := w.o.c.raw.Objects.Insert(w.o.bucket, attrs.toRawObject(w.o.bucket)).
Media(pr, mediaOpts...).
Projection("full").
Context(w.ctx)
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
w.err = err
pr.CloseWithError(w.err)
return
}
var resp *raw.Object
err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
if err == nil {
resp, err = call.Do()
}
if err != nil {
w.err = err
pr.CloseWithError(w.err)
return
}
w.obj = newObject(resp)
}()
return nil
}
// Write appends to w. It implements the io.Writer interface.
func (w *Writer) Write(p []byte) (n int, err error) {
if w.err != nil {
return 0, w.err
}
if !w.opened {
if err := w.open(); err != nil {
return 0, err
}
}
return w.pw.Write(p)
}
// Close completes the write operation and flushes any buffered data.
// If Close doesn't return an error, metadata about the written object
// can be retrieved by calling Attrs.
func (w *Writer) Close() error {
if !w.opened {
if err := w.open(); err != nil {
return err
}
}
if err := w.pw.Close(); err != nil {
return err
}
<-w.donec
return w.err
}
// CloseWithError aborts the write operation with the provided error.
// CloseWithError always returns nil.
func (w *Writer) CloseWithError(err error) error {
if !w.opened {
return nil
}
return w.pw.CloseWithError(err)
}
// Attrs returns metadata about a successfully-written object.
// It's only valid to call it after Close returns nil.
func (w *Writer) Attrs() *ObjectAttrs {
return w.obj
}

191
vendor/github.com/docker/docker/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2017 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

19
vendor/github.com/docker/docker/NOTICE generated vendored Normal file
View File

@ -0,0 +1,19 @@
Docker
Copyright 2012-2017 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
This product contains software (https://github.com/kr/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.

304
vendor/github.com/docker/docker/README.md generated vendored Normal file
View File

@ -0,0 +1,304 @@
Docker: the container engine [![Release](https://img.shields.io/github/release/docker/docker.svg)](https://github.com/docker/docker/releases/latest)
============================
Docker is an open source project to pack, ship and run any application
as a lightweight container.
Docker containers are both *hardware-agnostic* and *platform-agnostic*.
This means they can run anywhere, from your laptop to the largest
cloud compute instance and everything in between - and they don't require
you to use a particular language, framework or packaging system. That
makes them great building blocks for deploying and scaling web apps,
databases, and backend services without depending on a particular stack
or provider.
Docker began as an open-source implementation of the deployment engine which
powered [dotCloud](http://web.archive.org/web/20130530031104/https://www.dotcloud.com/),
a popular Platform-as-a-Service. It benefits directly from the experience
accumulated over several years of large-scale operation and support of hundreds
of thousands of applications and databases.
![Docker logo](docs/static_files/docker-logo-compressed.png "Docker")
## Security Disclosure
Security is very important to us. If you have any issue regarding security,
please disclose the information responsibly by sending an email to
security@docker.com and not by creating a GitHub issue.
## Better than VMs
A common method for distributing applications and sandboxing their
execution is to use virtual machines, or VMs. Typical VM formats are
VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory
these formats should allow every developer to automatically package
their application into a "machine" for easy distribution and deployment.
In practice, that almost never happens, for a few reasons:
* *Size*: VMs are very large which makes them impractical to store
and transfer.
* *Performance*: running VMs consumes significant CPU and memory,
which makes them impractical in many scenarios, for example local
development of multi-tier applications, and large-scale deployment
of cpu and memory-intensive applications on large numbers of
machines.
* *Portability*: competing VM environments don't play well with each
other. Although conversion tools do exist, they are limited and
add even more overhead.
* *Hardware-centric*: VMs were designed with machine operators in
mind, not software developers. As a result, they offer very
limited tooling for what developers need most: building, testing
and running their software. For example, VMs offer no facilities
for application versioning, monitoring, configuration, logging or
service discovery.
By contrast, Docker relies on a different sandboxing method known as
*containerization*. Unlike traditional virtualization, containerization
takes place at the kernel level. Most modern operating system kernels
now support the primitives necessary for containerization, including
Linux with [openvz](https://openvz.org),
[vserver](http://linux-vserver.org) and more recently
[lxc](https://linuxcontainers.org/), Solaris with
[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc),
and FreeBSD with
[Jails](https://www.freebsd.org/doc/handbook/jails.html).
Docker builds on top of these low-level primitives to offer developers a
portable format and runtime environment that solves all four problems.
Docker containers are small (and their transfer can be optimized with
layers), they have basically zero memory and cpu overhead, they are
completely portable, and are designed from the ground up with an
application-centric design.
Perhaps best of all, because Docker operates at the OS level, it can still be
run inside a VM!
## Plays well with others
Docker does not require you to buy into a particular programming
language, framework, packaging system, or configuration language.
Is your application a Unix process? Does it use files, tcp connections,
environment variables, standard Unix streams and command-line arguments
as inputs and outputs? Then Docker can run it.
Can your application's build be expressed as a sequence of such
commands? Then Docker can build it.
## Escape dependency hell
A common problem for developers is the difficulty of managing all
their application's dependencies in a simple and automated way.
This is usually difficult for several reasons:
* *Cross-platform dependencies*. Modern applications often depend on
a combination of system libraries and binaries, language-specific
packages, framework-specific modules, internal components
developed for another project, etc. These dependencies live in
different "worlds" and require different tools - these tools
typically don't work well with each other, requiring awkward
custom integrations.
* *Conflicting dependencies*. Different applications may depend on
different versions of the same dependency. Packaging tools handle
these situations with various degrees of ease - but they all
handle them in different and incompatible ways, which again forces
the developer to do extra work.
* *Custom dependencies*. A developer may need to prepare a custom
version of their application's dependency. Some packaging systems
can handle custom versions of a dependency, others can't - and all
of them handle it differently.
Docker solves the problem of dependency hell by giving developers a simple
way to express *all* their application's dependencies in one place, while
streamlining the process of assembling them. If this makes you think of
[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't
*replace* your favorite packaging systems. It simply orchestrates
their use in a simple and repeatable way. How does it do that? With
layers.
Docker defines a build as running a sequence of Unix commands, one
after the other, in the same container. Build commands modify the
contents of the container (usually by installing new files on the
filesystem), the next command modifies it some more, etc. Since each
build command inherits the result of the previous commands, the
*order* in which the commands are executed expresses *dependencies*.
Here's a typical Docker build process:
```bash
FROM ubuntu:12.04
RUN apt-get update && apt-get install -y python python-pip curl
RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
RUN cd helloflask-master && pip install -r requirements.txt
```
Note that Docker doesn't care *how* dependencies are built - as long
as they can be built by running a Unix command in a container.
Getting started
===============
Docker can be installed either on your computer for building applications or
on servers for running them. To get started, [check out the installation
instructions in the
documentation](https://docs.docker.com/engine/installation/).
Usage examples
==============
Docker can be used to run short-lived commands, long-running daemons
(app servers, databases, etc.), interactive shell sessions, etc.
You can find a [list of real-world
examples](https://docs.docker.com/engine/examples/) in the
documentation.
Under the hood
--------------
Under the hood, Docker is built on the following components:
* The
[cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt)
and
[namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html)
capabilities of the Linux kernel
* The [Go](https://golang.org) programming language
* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md)
* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md)
Contributing to Docker [![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker)
======================
| **Master** (Linux) | **Experimental** (Linux) | **Windows** | **FreeBSD** |
|------------------|----------------------|---------|---------|
| [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) |
Want to hack on Docker? Awesome! We have [instructions to help you get
started contributing code or documentation](https://docs.docker.com/opensource/project/who-written-for/).
These instructions are probably not perfect, please let us know if anything
feels wrong or incomplete. Better yet, submit a PR and improve them yourself.
Getting the development builds
==============================
Want to run Docker from a master build? You can download
master builds at [master.dockerproject.org](https://master.dockerproject.org).
They are updated with each commit merged into the master branch.
Don't know how to use that super cool new feature in the master build? Check
out the master docs at
[docs.master.dockerproject.org](http://docs.master.dockerproject.org).
How the project is run
======================
Docker is a very, very active project. If you want to learn more about how it is run,
or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project).
We are always open to suggestions on process improvements, and are always looking for more maintainers.
### Talking to other Docker users and contributors
<table class="tg">
<col width="45%">
<col width="65%">
<tr>
<td>Internet&nbsp;Relay&nbsp;Chat&nbsp;(IRC)</td>
<td>
<p>
IRC is a direct line to our most knowledgeable Docker users; we have
both the <code>#docker</code> and <code>#docker-dev</code> group on
<strong>irc.freenode.net</strong>.
IRC is a rich chat protocol but it can overwhelm new users. You can search
<a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
</p>
Read our <a href="https://docs.docker.com/opensource/get-help/#/irc-quickstart" target="_blank">IRC quickstart guide</a> for an easy way to get started.
</td>
</tr>
<tr>
<td>Docker Community Forums</td>
<td>
The <a href="https://forums.docker.com/c/open-source-projects/de" target="_blank">Docker Engine</a>
group is for users of the Docker Engine project.
</td>
</tr>
<tr>
<td>Google Groups</td>
<td>
The <a href="https://groups.google.com/forum/#!forum/docker-dev"
target="_blank">docker-dev</a> group is for contributors and other people
contributing to the Docker project. You can join this group without a
Google account by sending an email to <a
href="mailto:docker-dev+subscribe@googlegroups.com">docker-dev+subscribe@googlegroups.com</a>.
You'll receive a join-request message; simply reply to the message to
confirm your subscription.
</td>
</tr>
<tr>
<td>Twitter</td>
<td>
You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
to get updates on our products. You can also tweet us questions or just
share blogs or stories.
</td>
</tr>
<tr>
<td>Stack Overflow</td>
<td>
Stack Overflow has over 7000 Docker questions listed. We regularly
monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
and so do many other knowledgeable Docker users.
</td>
</tr>
</table>
### Legal
*Brought to you courtesy of our legal counsel. For more context,
please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.*
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
Licensing
=========
Docker is licensed under the Apache License, Version 2.0. See
[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full
license text.
Other Docker Related Projects
=============================
There are a number of projects under development that are based on Docker's
core technology. These projects expand the tooling built around the
Docker platform to broaden its application and utility.
* [Docker Registry](https://github.com/docker/distribution): Registry
server for Docker (hosting/delivery of repositories and images)
* [Docker Machine](https://github.com/docker/machine): Machine management
for a container-centric world
* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering
system
* [Docker Compose](https://github.com/docker/compose) (formerly Fig):
Define and run multi-container apps
* [Kitematic](https://github.com/docker/kitematic): The easiest way to use
Docker on Mac and Windows
If you know of another project underway that should be listed here, please help
us keep this list up-to-date by submitting a PR.
Awesome-Docker
==============
You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there.

11
vendor/github.com/docker/docker/pkg/README.md generated vendored Normal file
View File

@ -0,0 +1,11 @@
pkg/ is a collection of utility packages used by the Docker project without being specific to its internals.
Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible.
If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the
Docker organization, to facilitate re-use by other projects. However that is not the priority.
The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core
Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad!
Because utility packages are small and neatly separated from the rest of the codebase, they are a good
place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them!

66
vendor/github.com/docker/docker/pkg/term/ascii.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
package term
import (
"fmt"
"strings"
)
// ASCII list the possible supported ASCII key sequence
var ASCII = []string{
"ctrl-@",
"ctrl-a",
"ctrl-b",
"ctrl-c",
"ctrl-d",
"ctrl-e",
"ctrl-f",
"ctrl-g",
"ctrl-h",
"ctrl-i",
"ctrl-j",
"ctrl-k",
"ctrl-l",
"ctrl-m",
"ctrl-n",
"ctrl-o",
"ctrl-p",
"ctrl-q",
"ctrl-r",
"ctrl-s",
"ctrl-t",
"ctrl-u",
"ctrl-v",
"ctrl-w",
"ctrl-x",
"ctrl-y",
"ctrl-z",
"ctrl-[",
"ctrl-\\",
"ctrl-]",
"ctrl-^",
"ctrl-_",
}
// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code.
func ToBytes(keys string) ([]byte, error) {
codes := []byte{}
next:
for _, key := range strings.Split(keys, ",") {
if len(key) != 1 {
for code, ctrl := range ASCII {
if ctrl == key {
codes = append(codes, byte(code))
continue next
}
}
if key == "DEL" {
codes = append(codes, 127)
} else {
return nil, fmt.Errorf("Unknown character: '%s'", key)
}
} else {
codes = append(codes, byte(key[0]))
}
}
return codes, nil
}

View File

@ -0,0 +1,50 @@
// +build linux,cgo
package term
import (
"syscall"
"unsafe"
)
// #include <termios.h>
import "C"
// Termios is the Unix API for terminal I/O.
// It is passthrough for syscall.Termios in order to make it portable with
// other platforms where it is not available or handled differently.
type Termios syscall.Termios
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if err := tcget(fd, &oldState.termios); err != 0 {
return nil, err
}
newState := oldState.termios
C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState)))
if err := tcset(fd, &newState); err != 0 {
return nil, err
}
return &oldState, nil
}
func tcget(fd uintptr, p *Termios) syscall.Errno {
ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
if ret != 0 {
return err.(syscall.Errno)
}
return 0
}
func tcset(fd uintptr, p *Termios) syscall.Errno {
ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
if ret != 0 {
return err.(syscall.Errno)
}
return 0
}

20
vendor/github.com/docker/docker/pkg/term/tc_other.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// +build !windows
// +build !linux !cgo
// +build !solaris !cgo
package term
import (
"syscall"
"unsafe"
)
func tcget(fd uintptr, p *Termios) syscall.Errno {
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p)))
return err
}
func tcset(fd uintptr, p *Termios) syscall.Errno {
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p)))
return err
}

View File

@ -0,0 +1,63 @@
// +build solaris,cgo
package term
import (
"syscall"
"unsafe"
)
// #include <termios.h>
import "C"
// Termios is the Unix API for terminal I/O.
// It is passthrough for syscall.Termios in order to make it portable with
// other platforms where it is not available or handled differently.
type Termios syscall.Termios
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if err := tcget(fd, &oldState.termios); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY)
newState.Oflag &^= syscall.OPOST
newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN)
newState.Cflag &^= (syscall.CSIZE | syscall.PARENB)
newState.Cflag |= syscall.CS8
/*
VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned
Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It
needs to be explicitly set to 1.
*/
newState.Cc[C.VMIN] = 1
newState.Cc[C.VTIME] = 0
if err := tcset(fd, &newState); err != 0 {
return nil, err
}
return &oldState, nil
}
func tcget(fd uintptr, p *Termios) syscall.Errno {
ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
if ret != 0 {
return err.(syscall.Errno)
}
return 0
}
func tcset(fd uintptr, p *Termios) syscall.Errno {
ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
if ret != 0 {
return err.(syscall.Errno)
}
return 0
}

123
vendor/github.com/docker/docker/pkg/term/term.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
// +build !windows
// Package term provides structures and helper functions to work with
// terminal (state, sizes).
package term
import (
"errors"
"fmt"
"io"
"os"
"os/signal"
"syscall"
)
var (
// ErrInvalidState is returned if the state of the terminal is invalid.
ErrInvalidState = errors.New("Invalid terminal state")
)
// State represents the state of the terminal.
type State struct {
termios Termios
}
// Winsize represents the size of the terminal window.
type Winsize struct {
Height uint16
Width uint16
x uint16
y uint16
}
// StdStreams returns the standard streams (stdin, stdout, stderr).
func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
return os.Stdin, os.Stdout, os.Stderr
}
// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
func GetFdInfo(in interface{}) (uintptr, bool) {
var inFd uintptr
var isTerminalIn bool
if file, ok := in.(*os.File); ok {
inFd = file.Fd()
isTerminalIn = IsTerminal(inFd)
}
return inFd, isTerminalIn
}
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd uintptr) bool {
var termios Termios
return tcget(fd, &termios) == 0
}
// RestoreTerminal restores the terminal connected to the given file descriptor
// to a previous state.
func RestoreTerminal(fd uintptr, state *State) error {
if state == nil {
return ErrInvalidState
}
if err := tcset(fd, &state.termios); err != 0 {
return err
}
return nil
}
// SaveState saves the state of the terminal connected to the given file descriptor.
func SaveState(fd uintptr) (*State, error) {
var oldState State
if err := tcget(fd, &oldState.termios); err != 0 {
return nil, err
}
return &oldState, nil
}
// DisableEcho applies the specified state to the terminal connected to the file
// descriptor, with echo disabled.
func DisableEcho(fd uintptr, state *State) error {
newState := state.termios
newState.Lflag &^= syscall.ECHO
if err := tcset(fd, &newState); err != 0 {
return err
}
handleInterrupt(fd, state)
return nil
}
// SetRawTerminal puts the terminal connected to the given file descriptor into
// raw mode and returns the previous state. On UNIX, this puts both the input
// and output into raw mode. On Windows, it only puts the input into raw mode.
func SetRawTerminal(fd uintptr) (*State, error) {
oldState, err := MakeRaw(fd)
if err != nil {
return nil, err
}
handleInterrupt(fd, oldState)
return oldState, err
}
// SetRawTerminalOutput puts the output of terminal connected to the given file
// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
// state. On Windows, it disables LF -> CRLF translation.
func SetRawTerminalOutput(fd uintptr) (*State, error) {
return nil, nil
}
func handleInterrupt(fd uintptr, state *State) {
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, os.Interrupt)
go func() {
for range sigchan {
// quit cleanly and the new terminal item is on a new line
fmt.Println()
signal.Stop(sigchan)
close(sigchan)
RestoreTerminal(fd, state)
os.Exit(1)
}
}()
}

View File

@ -0,0 +1,41 @@
// +build solaris
package term
import (
"syscall"
"unsafe"
)
/*
#include <unistd.h>
#include <stropts.h>
#include <termios.h>
// Small wrapper to get rid of variadic args of ioctl()
int my_ioctl(int fd, int cmd, struct winsize *ws) {
return ioctl(fd, cmd, ws);
}
*/
import "C"
// GetWinsize returns the window size based on the specified file descriptor.
func GetWinsize(fd uintptr) (*Winsize, error) {
ws := &Winsize{}
ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
// Skip retval = 0
if ret == 0 {
return ws, nil
}
return ws, err
}
// SetWinsize tries to set the specified window size for the specified file descriptor.
func SetWinsize(fd uintptr, ws *Winsize) error {
ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
// Skip retval = 0
if ret == 0 {
return nil
}
return err
}

29
vendor/github.com/docker/docker/pkg/term/term_unix.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// +build !solaris,!windows
package term
import (
"syscall"
"unsafe"
)
// GetWinsize returns the window size based on the specified file descriptor.
func GetWinsize(fd uintptr) (*Winsize, error) {
ws := &Winsize{}
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws)))
// Skipp errno = 0
if err == 0 {
return ws, nil
}
return ws, err
}
// SetWinsize tries to set the specified window size for the specified file descriptor.
func SetWinsize(fd uintptr, ws *Winsize) error {
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws)))
// Skipp errno = 0
if err == 0 {
return nil
}
return err
}

View File

@ -0,0 +1,233 @@
// +build windows
package term
import (
"io"
"os"
"os/signal"
"syscall"
"github.com/Azure/go-ansiterm/winterm"
"github.com/docker/docker/pkg/term/windows"
)
// State holds the console mode for the terminal.
type State struct {
mode uint32
}
// Winsize is used for window size.
type Winsize struct {
Height uint16
Width uint16
}
const (
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
enableVirtualTerminalInput = 0x0200
enableVirtualTerminalProcessing = 0x0004
disableNewlineAutoReturn = 0x0008
)
// vtInputSupported is true if enableVirtualTerminalInput is supported by the console
var vtInputSupported bool
// StdStreams returns the standard streams (stdin, stdout, stderr).
func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
// Turn on VT handling on all std handles, if possible. This might
// fail, in which case we will fall back to terminal emulation.
var emulateStdin, emulateStdout, emulateStderr bool
fd := os.Stdin.Fd()
if mode, err := winterm.GetConsoleMode(fd); err == nil {
// Validate that enableVirtualTerminalInput is supported, but do not set it.
if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil {
emulateStdin = true
} else {
vtInputSupported = true
}
// Unconditionally set the console mode back even on failure because SetConsoleMode
// remembers invalid bits on input handles.
winterm.SetConsoleMode(fd, mode)
}
fd = os.Stdout.Fd()
if mode, err := winterm.GetConsoleMode(fd); err == nil {
// Validate disableNewlineAutoReturn is supported, but do not set it.
if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
emulateStdout = true
} else {
winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
}
}
fd = os.Stderr.Fd()
if mode, err := winterm.GetConsoleMode(fd); err == nil {
// Validate disableNewlineAutoReturn is supported, but do not set it.
if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
emulateStderr = true
} else {
winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
}
}
if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" {
// The ConEmu and ConsoleZ terminals emulate ANSI on output streams well.
emulateStdin = true
emulateStdout = false
emulateStderr = false
}
if emulateStdin {
stdIn = windows.NewAnsiReader(syscall.STD_INPUT_HANDLE)
} else {
stdIn = os.Stdin
}
if emulateStdout {
stdOut = windows.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE)
} else {
stdOut = os.Stdout
}
if emulateStderr {
stdErr = windows.NewAnsiWriter(syscall.STD_ERROR_HANDLE)
} else {
stdErr = os.Stderr
}
return
}
// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
func GetFdInfo(in interface{}) (uintptr, bool) {
return windows.GetHandleInfo(in)
}
// GetWinsize returns the window size based on the specified file descriptor.
func GetWinsize(fd uintptr) (*Winsize, error) {
info, err := winterm.GetConsoleScreenBufferInfo(fd)
if err != nil {
return nil, err
}
winsize := &Winsize{
Width: uint16(info.Window.Right - info.Window.Left + 1),
Height: uint16(info.Window.Bottom - info.Window.Top + 1),
}
return winsize, nil
}
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd uintptr) bool {
return windows.IsConsole(fd)
}
// RestoreTerminal restores the terminal connected to the given file descriptor
// to a previous state.
func RestoreTerminal(fd uintptr, state *State) error {
return winterm.SetConsoleMode(fd, state.mode)
}
// SaveState saves the state of the terminal connected to the given file descriptor.
func SaveState(fd uintptr) (*State, error) {
mode, e := winterm.GetConsoleMode(fd)
if e != nil {
return nil, e
}
return &State{mode: mode}, nil
}
// DisableEcho disables echo for the terminal connected to the given file descriptor.
// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
func DisableEcho(fd uintptr, state *State) error {
mode := state.mode
mode &^= winterm.ENABLE_ECHO_INPUT
mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT
err := winterm.SetConsoleMode(fd, mode)
if err != nil {
return err
}
// Register an interrupt handler to catch and restore prior state
restoreAtInterrupt(fd, state)
return nil
}
// SetRawTerminal puts the terminal connected to the given file descriptor into
// raw mode and returns the previous state. On UNIX, this puts both the input
// and output into raw mode. On Windows, it only puts the input into raw mode.
func SetRawTerminal(fd uintptr) (*State, error) {
state, err := MakeRaw(fd)
if err != nil {
return nil, err
}
// Register an interrupt handler to catch and restore prior state
restoreAtInterrupt(fd, state)
return state, err
}
// SetRawTerminalOutput puts the output of terminal connected to the given file
// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
// state. On Windows, it disables LF -> CRLF translation.
func SetRawTerminalOutput(fd uintptr) (*State, error) {
state, err := SaveState(fd)
if err != nil {
return nil, err
}
// Ignore failures, since disableNewlineAutoReturn might not be supported on this
// version of Windows.
winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn)
return state, err
}
// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be restored.
func MakeRaw(fd uintptr) (*State, error) {
state, err := SaveState(fd)
if err != nil {
return nil, err
}
mode := state.mode
// See
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
// Disable these modes
mode &^= winterm.ENABLE_ECHO_INPUT
mode &^= winterm.ENABLE_LINE_INPUT
mode &^= winterm.ENABLE_MOUSE_INPUT
mode &^= winterm.ENABLE_WINDOW_INPUT
mode &^= winterm.ENABLE_PROCESSED_INPUT
// Enable these modes
mode |= winterm.ENABLE_EXTENDED_FLAGS
mode |= winterm.ENABLE_INSERT_MODE
mode |= winterm.ENABLE_QUICK_EDIT_MODE
if vtInputSupported {
mode |= enableVirtualTerminalInput
}
err = winterm.SetConsoleMode(fd, mode)
if err != nil {
return nil, err
}
return state, nil
}
func restoreAtInterrupt(fd uintptr, state *State) {
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, os.Interrupt)
go func() {
_ = <-sigchan
RestoreTerminal(fd, state)
os.Exit(0)
}()
}

View File

@ -0,0 +1,69 @@
package term
import (
"syscall"
"unsafe"
)
const (
getTermios = syscall.TIOCGETA
setTermios = syscall.TIOCSETA
)
// Termios magic numbers, passthrough to the ones defined in syscall.
const (
IGNBRK = syscall.IGNBRK
PARMRK = syscall.PARMRK
INLCR = syscall.INLCR
IGNCR = syscall.IGNCR
ECHONL = syscall.ECHONL
CSIZE = syscall.CSIZE
ICRNL = syscall.ICRNL
ISTRIP = syscall.ISTRIP
PARENB = syscall.PARENB
ECHO = syscall.ECHO
ICANON = syscall.ICANON
ISIG = syscall.ISIG
IXON = syscall.IXON
BRKINT = syscall.BRKINT
INPCK = syscall.INPCK
OPOST = syscall.OPOST
CS8 = syscall.CS8
IEXTEN = syscall.IEXTEN
)
// Termios is the Unix API for terminal I/O.
type Termios struct {
Iflag uint64
Oflag uint64
Cflag uint64
Lflag uint64
Cc [20]byte
Ispeed uint64
Ospeed uint64
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
newState.Oflag &^= OPOST
newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
newState.Cflag &^= (CSIZE | PARENB)
newState.Cflag |= CS8
newState.Cc[syscall.VMIN] = 1
newState.Cc[syscall.VTIME] = 0
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
return nil, err
}
return &oldState, nil
}

View File

@ -0,0 +1,69 @@
package term
import (
"syscall"
"unsafe"
)
const (
getTermios = syscall.TIOCGETA
setTermios = syscall.TIOCSETA
)
// Termios magic numbers, passthrough to the ones defined in syscall.
const (
IGNBRK = syscall.IGNBRK
PARMRK = syscall.PARMRK
INLCR = syscall.INLCR
IGNCR = syscall.IGNCR
ECHONL = syscall.ECHONL
CSIZE = syscall.CSIZE
ICRNL = syscall.ICRNL
ISTRIP = syscall.ISTRIP
PARENB = syscall.PARENB
ECHO = syscall.ECHO
ICANON = syscall.ICANON
ISIG = syscall.ISIG
IXON = syscall.IXON
BRKINT = syscall.BRKINT
INPCK = syscall.INPCK
OPOST = syscall.OPOST
CS8 = syscall.CS8
IEXTEN = syscall.IEXTEN
)
// Termios is the Unix API for terminal I/O.
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]byte
Ispeed uint32
Ospeed uint32
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
newState.Oflag &^= OPOST
newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
newState.Cflag &^= (CSIZE | PARENB)
newState.Cflag |= CS8
newState.Cc[syscall.VMIN] = 1
newState.Cc[syscall.VTIME] = 0
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
return nil, err
}
return &oldState, nil
}

View File

@ -0,0 +1,47 @@
// +build !cgo
package term
import (
"syscall"
"unsafe"
)
const (
getTermios = syscall.TCGETS
setTermios = syscall.TCSETS
)
// Termios is the Unix API for terminal I/O.
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]byte
Ispeed uint32
Ospeed uint32
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON)
newState.Oflag &^= syscall.OPOST
newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN)
newState.Cflag &^= (syscall.CSIZE | syscall.PARENB)
newState.Cflag |= syscall.CS8
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 {
return nil, err
}
return &oldState, nil
}

View File

@ -0,0 +1,69 @@
package term
import (
"syscall"
"unsafe"
)
const (
getTermios = syscall.TIOCGETA
setTermios = syscall.TIOCSETA
)
// Termios magic numbers, passthrough to the ones defined in syscall.
const (
IGNBRK = syscall.IGNBRK
PARMRK = syscall.PARMRK
INLCR = syscall.INLCR
IGNCR = syscall.IGNCR
ECHONL = syscall.ECHONL
CSIZE = syscall.CSIZE
ICRNL = syscall.ICRNL
ISTRIP = syscall.ISTRIP
PARENB = syscall.PARENB
ECHO = syscall.ECHO
ICANON = syscall.ICANON
ISIG = syscall.ISIG
IXON = syscall.IXON
BRKINT = syscall.BRKINT
INPCK = syscall.INPCK
OPOST = syscall.OPOST
CS8 = syscall.CS8
IEXTEN = syscall.IEXTEN
)
// Termios is the Unix API for terminal I/O.
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]byte
Ispeed uint32
Ospeed uint32
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
newState.Oflag &^= OPOST
newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
newState.Cflag &^= (CSIZE | PARENB)
newState.Cflag |= CS8
newState.Cc[syscall.VMIN] = 1
newState.Cc[syscall.VTIME] = 0
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
return nil, err
}
return &oldState, nil
}

View File

@ -0,0 +1,263 @@
// +build windows
package windows
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"strings"
"unsafe"
ansiterm "github.com/Azure/go-ansiterm"
"github.com/Azure/go-ansiterm/winterm"
)
const (
escapeSequence = ansiterm.KEY_ESC_CSI
)
// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation.
type ansiReader struct {
file *os.File
fd uintptr
buffer []byte
cbBuffer int
command []byte
}
// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a
// Windows console input handle.
func NewAnsiReader(nFile int) io.ReadCloser {
initLogger()
file, fd := winterm.GetStdFile(nFile)
return &ansiReader{
file: file,
fd: fd,
command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
buffer: make([]byte, 0),
}
}
// Close closes the wrapped file.
func (ar *ansiReader) Close() (err error) {
return ar.file.Close()
}
// Fd returns the file descriptor of the wrapped file.
func (ar *ansiReader) Fd() uintptr {
return ar.fd
}
// Read reads up to len(p) bytes of translated input events into p.
func (ar *ansiReader) Read(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
// Previously read bytes exist, read as much as we can and return
if len(ar.buffer) > 0 {
logger.Debugf("Reading previously cached bytes")
originalLength := len(ar.buffer)
copiedLength := copy(p, ar.buffer)
if copiedLength == originalLength {
ar.buffer = make([]byte, 0, len(p))
} else {
ar.buffer = ar.buffer[copiedLength:]
}
logger.Debugf("Read from cache p[%d]: % x", copiedLength, p)
return copiedLength, nil
}
// Read and translate key events
events, err := readInputEvents(ar.fd, len(p))
if err != nil {
return 0, err
} else if len(events) == 0 {
logger.Debug("No input events detected")
return 0, nil
}
keyBytes := translateKeyEvents(events, []byte(escapeSequence))
// Save excess bytes and right-size keyBytes
if len(keyBytes) > len(p) {
logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p))
ar.buffer = keyBytes[len(p):]
keyBytes = keyBytes[:len(p)]
} else if len(keyBytes) == 0 {
logger.Debug("No key bytes returned from the translator")
return 0, nil
}
copiedLength := copy(p, keyBytes)
if copiedLength != len(keyBytes) {
return 0, errors.New("unexpected copy length encountered")
}
logger.Debugf("Read p[%d]: % x", copiedLength, p)
logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes)
return copiedLength, nil
}
// readInputEvents polls until at least one event is available.
func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) {
// Determine the maximum number of records to retrieve
// -- Cast around the type system to obtain the size of a single INPUT_RECORD.
// unsafe.Sizeof requires an expression vs. a type-reference; the casting
// tricks the type system into believing it has such an expression.
recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes)))))
countRecords := maxBytes / recordSize
if countRecords > ansiterm.MAX_INPUT_EVENTS {
countRecords = ansiterm.MAX_INPUT_EVENTS
} else if countRecords == 0 {
countRecords = 1
}
logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize)
// Wait for and read input events
events := make([]winterm.INPUT_RECORD, countRecords)
nEvents := uint32(0)
eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE)
if err != nil {
return nil, err
}
if eventsExist {
err = winterm.ReadConsoleInput(fd, events, &nEvents)
if err != nil {
return nil, err
}
}
// Return a slice restricted to the number of returned records
logger.Debugf("[windows] readInputEvents: Read %v events", nEvents)
return events[:nEvents], nil
}
// KeyEvent Translation Helpers
var arrowKeyMapPrefix = map[uint16]string{
winterm.VK_UP: "%s%sA",
winterm.VK_DOWN: "%s%sB",
winterm.VK_RIGHT: "%s%sC",
winterm.VK_LEFT: "%s%sD",
}
var keyMapPrefix = map[uint16]string{
winterm.VK_UP: "\x1B[%sA",
winterm.VK_DOWN: "\x1B[%sB",
winterm.VK_RIGHT: "\x1B[%sC",
winterm.VK_LEFT: "\x1B[%sD",
winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1
winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4
winterm.VK_INSERT: "\x1B[2%s~",
winterm.VK_DELETE: "\x1B[3%s~",
winterm.VK_PRIOR: "\x1B[5%s~",
winterm.VK_NEXT: "\x1B[6%s~",
winterm.VK_F1: "",
winterm.VK_F2: "",
winterm.VK_F3: "\x1B[13%s~",
winterm.VK_F4: "\x1B[14%s~",
winterm.VK_F5: "\x1B[15%s~",
winterm.VK_F6: "\x1B[17%s~",
winterm.VK_F7: "\x1B[18%s~",
winterm.VK_F8: "\x1B[19%s~",
winterm.VK_F9: "\x1B[20%s~",
winterm.VK_F10: "\x1B[21%s~",
winterm.VK_F11: "\x1B[23%s~",
winterm.VK_F12: "\x1B[24%s~",
}
// translateKeyEvents converts the input events into the appropriate ANSI string.
func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte {
var buffer bytes.Buffer
for _, event := range events {
if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 {
buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence))
}
}
return buffer.Bytes()
}
// keyToString maps the given input event record to the corresponding string.
func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string {
if keyEvent.UnicodeChar == 0 {
return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence)
}
_, alt, control := getControlKeys(keyEvent.ControlKeyState)
if control {
// TODO(azlinux): Implement following control sequences
// <Ctrl>-D Signals the end of input from the keyboard; also exits current shell.
// <Ctrl>-H Deletes the first character to the left of the cursor. Also called the ERASE key.
// <Ctrl>-Q Restarts printing after it has been stopped with <Ctrl>-s.
// <Ctrl>-S Suspends printing on the screen (does not stop the program).
// <Ctrl>-U Deletes all characters on the current line. Also called the KILL key.
// <Ctrl>-E Quits current command and creates a core
}
// <Alt>+Key generates ESC N Key
if !control && alt {
return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar))
}
return string(keyEvent.UnicodeChar)
}
// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string.
func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string {
shift, alt, control := getControlKeys(controlState)
modifier := getControlKeysModifier(shift, alt, control)
if format, ok := arrowKeyMapPrefix[key]; ok {
return fmt.Sprintf(format, escapeSequence, modifier)
}
if format, ok := keyMapPrefix[key]; ok {
return fmt.Sprintf(format, modifier)
}
return ""
}
// getControlKeys extracts the shift, alt, and ctrl key states.
func getControlKeys(controlState uint32) (shift, alt, control bool) {
shift = 0 != (controlState & winterm.SHIFT_PRESSED)
alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED))
control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED))
return shift, alt, control
}
// getControlKeysModifier returns the ANSI modifier for the given combination of control keys.
func getControlKeysModifier(shift, alt, control bool) string {
if shift && alt && control {
return ansiterm.KEY_CONTROL_PARAM_8
}
if alt && control {
return ansiterm.KEY_CONTROL_PARAM_7
}
if shift && control {
return ansiterm.KEY_CONTROL_PARAM_6
}
if control {
return ansiterm.KEY_CONTROL_PARAM_5
}
if shift && alt {
return ansiterm.KEY_CONTROL_PARAM_4
}
if alt {
return ansiterm.KEY_CONTROL_PARAM_3
}
if shift {
return ansiterm.KEY_CONTROL_PARAM_2
}
return ""
}

View File

@ -0,0 +1,64 @@
// +build windows
package windows
import (
"io"
"os"
ansiterm "github.com/Azure/go-ansiterm"
"github.com/Azure/go-ansiterm/winterm"
)
// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation.
type ansiWriter struct {
file *os.File
fd uintptr
infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO
command []byte
escapeSequence []byte
inAnsiSequence bool
parser *ansiterm.AnsiParser
}
// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a
// Windows console output handle.
func NewAnsiWriter(nFile int) io.Writer {
initLogger()
file, fd := winterm.GetStdFile(nFile)
info, err := winterm.GetConsoleScreenBufferInfo(fd)
if err != nil {
return nil
}
parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file))
logger.Infof("newAnsiWriter: parser %p", parser)
aw := &ansiWriter{
file: file,
fd: fd,
infoReset: info,
command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
escapeSequence: []byte(ansiterm.KEY_ESC_CSI),
parser: parser,
}
logger.Infof("newAnsiWriter: aw.parser %p", aw.parser)
logger.Infof("newAnsiWriter: %v", aw)
return aw
}
func (aw *ansiWriter) Fd() uintptr {
return aw.fd
}
// Write writes len(p) bytes from p to the underlying data stream.
func (aw *ansiWriter) Write(p []byte) (total int, err error) {
if len(p) == 0 {
return 0, nil
}
logger.Infof("Write: % x", p)
logger.Infof("Write: %s", string(p))
return aw.parser.Parse(p)
}

View File

@ -0,0 +1,35 @@
// +build windows
package windows
import (
"os"
"github.com/Azure/go-ansiterm/winterm"
)
// GetHandleInfo returns file descriptor and bool indicating whether the file is a console.
func GetHandleInfo(in interface{}) (uintptr, bool) {
switch t := in.(type) {
case *ansiReader:
return t.Fd(), true
case *ansiWriter:
return t.Fd(), true
}
var inFd uintptr
var isTerminal bool
if file, ok := in.(*os.File); ok {
inFd = file.Fd()
isTerminal = IsConsole(inFd)
}
return inFd, isTerminal
}
// IsConsole returns true if the given file descriptor is a Windows Console.
// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console.
func IsConsole(fd uintptr) bool {
_, e := winterm.GetConsoleMode(fd)
return e == nil
}

View File

@ -0,0 +1,33 @@
// These files implement ANSI-aware input and output streams for use by the Docker Windows client.
// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create
// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls.
package windows
import (
"io/ioutil"
"os"
"sync"
ansiterm "github.com/Azure/go-ansiterm"
"github.com/Sirupsen/logrus"
)
var logger *logrus.Logger
var initOnce sync.Once
func initLogger() {
initOnce.Do(func() {
logFile := ioutil.Discard
if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
logFile, _ = os.Create("ansiReaderWriter.log")
}
logger = &logrus.Logger{
Out: logFile,
Formatter: new(logrus.TextFormatter),
Level: logrus.DebugLevel,
}
})
}

181
vendor/golang.org/x/crypto/ed25519/ed25519.go generated vendored Normal file
View File

@ -0,0 +1,181 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ed25519 implements the Ed25519 signature algorithm. See
// http://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
// https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-05.
package ed25519
// This code is a port of the public domain, “ref10” implementation of ed25519
// from SUPERCOP.
import (
"crypto"
cryptorand "crypto/rand"
"crypto/sha512"
"crypto/subtle"
"errors"
"io"
"strconv"
"golang.org/x/crypto/ed25519/internal/edwards25519"
)
const (
// PublicKeySize is the size, in bytes, of public keys as used in this package.
PublicKeySize = 32
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
PrivateKeySize = 64
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
SignatureSize = 64
)
// PublicKey is the type of Ed25519 public keys.
type PublicKey []byte
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
type PrivateKey []byte
// Public returns the PublicKey corresponding to priv.
func (priv PrivateKey) Public() crypto.PublicKey {
publicKey := make([]byte, PublicKeySize)
copy(publicKey, priv[32:])
return PublicKey(publicKey)
}
// Sign signs the given message with priv.
// Ed25519 performs two passes over messages to be signed and therefore cannot
// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
// indicate the message hasn't been hashed. This can be achieved by passing
// crypto.Hash(0) as the value for opts.
func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
if opts.HashFunc() != crypto.Hash(0) {
return nil, errors.New("ed25519: cannot sign hashed message")
}
return Sign(priv, message), nil
}
// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) {
if rand == nil {
rand = cryptorand.Reader
}
privateKey = make([]byte, PrivateKeySize)
publicKey = make([]byte, PublicKeySize)
_, err = io.ReadFull(rand, privateKey[:32])
if err != nil {
return nil, nil, err
}
digest := sha512.Sum512(privateKey[:32])
digest[0] &= 248
digest[31] &= 127
digest[31] |= 64
var A edwards25519.ExtendedGroupElement
var hBytes [32]byte
copy(hBytes[:], digest[:])
edwards25519.GeScalarMultBase(&A, &hBytes)
var publicKeyBytes [32]byte
A.ToBytes(&publicKeyBytes)
copy(privateKey[32:], publicKeyBytes[:])
copy(publicKey, publicKeyBytes[:])
return publicKey, privateKey, nil
}
// Sign signs the message with privateKey and returns a signature. It will
// panic if len(privateKey) is not PrivateKeySize.
func Sign(privateKey PrivateKey, message []byte) []byte {
if l := len(privateKey); l != PrivateKeySize {
panic("ed25519: bad private key length: " + strconv.Itoa(l))
}
h := sha512.New()
h.Write(privateKey[:32])
var digest1, messageDigest, hramDigest [64]byte
var expandedSecretKey [32]byte
h.Sum(digest1[:0])
copy(expandedSecretKey[:], digest1[:])
expandedSecretKey[0] &= 248
expandedSecretKey[31] &= 63
expandedSecretKey[31] |= 64
h.Reset()
h.Write(digest1[32:])
h.Write(message)
h.Sum(messageDigest[:0])
var messageDigestReduced [32]byte
edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
var R edwards25519.ExtendedGroupElement
edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
var encodedR [32]byte
R.ToBytes(&encodedR)
h.Reset()
h.Write(encodedR[:])
h.Write(privateKey[32:])
h.Write(message)
h.Sum(hramDigest[:0])
var hramDigestReduced [32]byte
edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
var s [32]byte
edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
signature := make([]byte, SignatureSize)
copy(signature[:], encodedR[:])
copy(signature[32:], s[:])
return signature
}
// Verify reports whether sig is a valid signature of message by publicKey. It
// will panic if len(publicKey) is not PublicKeySize.
func Verify(publicKey PublicKey, message, sig []byte) bool {
if l := len(publicKey); l != PublicKeySize {
panic("ed25519: bad public key length: " + strconv.Itoa(l))
}
if len(sig) != SignatureSize || sig[63]&224 != 0 {
return false
}
var A edwards25519.ExtendedGroupElement
var publicKeyBytes [32]byte
copy(publicKeyBytes[:], publicKey)
if !A.FromBytes(&publicKeyBytes) {
return false
}
edwards25519.FeNeg(&A.X, &A.X)
edwards25519.FeNeg(&A.T, &A.T)
h := sha512.New()
h.Write(sig[:32])
h.Write(publicKey[:])
h.Write(message)
var digest [64]byte
h.Sum(digest[:0])
var hReduced [32]byte
edwards25519.ScReduce(&hReduced, &digest)
var R edwards25519.ProjectiveGroupElement
var b [32]byte
copy(b[:], sig[32:])
edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b)
var checkR [32]byte
R.ToBytes(&checkR)
return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

98
vendor/golang.org/x/crypto/ssh/buffer.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"io"
"sync"
)
// buffer provides a linked list buffer for data exchange
// between producer and consumer. Theoretically the buffer is
// of unlimited capacity as it does no allocation of its own.
type buffer struct {
// protects concurrent access to head, tail and closed
*sync.Cond
head *element // the buffer that will be read first
tail *element // the buffer that will be read last
closed bool
}
// An element represents a single link in a linked list.
type element struct {
buf []byte
next *element
}
// newBuffer returns an empty buffer that is not closed.
func newBuffer() *buffer {
e := new(element)
b := &buffer{
Cond: newCond(),
head: e,
tail: e,
}
return b
}
// write makes buf available for Read to receive.
// buf must not be modified after the call to write.
func (b *buffer) write(buf []byte) {
b.Cond.L.Lock()
e := &element{buf: buf}
b.tail.next = e
b.tail = e
b.Cond.Signal()
b.Cond.L.Unlock()
}
// eof closes the buffer. Reads from the buffer once all
// the data has been consumed will receive os.EOF.
func (b *buffer) eof() error {
b.Cond.L.Lock()
b.closed = true
b.Cond.Signal()
b.Cond.L.Unlock()
return nil
}
// Read reads data from the internal buffer in buf. Reads will block
// if no data is available, or until the buffer is closed.
func (b *buffer) Read(buf []byte) (n int, err error) {
b.Cond.L.Lock()
defer b.Cond.L.Unlock()
for len(buf) > 0 {
// if there is data in b.head, copy it
if len(b.head.buf) > 0 {
r := copy(buf, b.head.buf)
buf, b.head.buf = buf[r:], b.head.buf[r:]
n += r
continue
}
// if there is a next buffer, make it the head
if len(b.head.buf) == 0 && b.head != b.tail {
b.head = b.head.next
continue
}
// if at least one byte has been copied, return
if n > 0 {
break
}
// if nothing was read, and there is nothing outstanding
// check to see if the buffer is closed.
if b.closed {
err = io.EOF
break
}
// out of buffers, wait for producer
b.Cond.Wait()
}
return
}

503
vendor/golang.org/x/crypto/ssh/certs.go generated vendored Normal file
View File

@ -0,0 +1,503 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"bytes"
"errors"
"fmt"
"io"
"net"
"sort"
"time"
)
// These constants from [PROTOCOL.certkeys] represent the algorithm names
// for certificate types supported by this package.
const (
CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com"
CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com"
CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com"
CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com"
CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com"
)
// Certificate types distinguish between host and user
// certificates. The values can be set in the CertType field of
// Certificate.
const (
UserCert = 1
HostCert = 2
)
// Signature represents a cryptographic signature.
type Signature struct {
Format string
Blob []byte
}
// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that
// a certificate does not expire.
const CertTimeInfinity = 1<<64 - 1
// An Certificate represents an OpenSSH certificate as defined in
// [PROTOCOL.certkeys]?rev=1.8.
type Certificate struct {
Nonce []byte
Key PublicKey
Serial uint64
CertType uint32
KeyId string
ValidPrincipals []string
ValidAfter uint64
ValidBefore uint64
Permissions
Reserved []byte
SignatureKey PublicKey
Signature *Signature
}
// genericCertData holds the key-independent part of the certificate data.
// Overall, certificates contain an nonce, public key fields and
// key-independent fields.
type genericCertData struct {
Serial uint64
CertType uint32
KeyId string
ValidPrincipals []byte
ValidAfter uint64
ValidBefore uint64
CriticalOptions []byte
Extensions []byte
Reserved []byte
SignatureKey []byte
Signature []byte
}
func marshalStringList(namelist []string) []byte {
var to []byte
for _, name := range namelist {
s := struct{ N string }{name}
to = append(to, Marshal(&s)...)
}
return to
}
type optionsTuple struct {
Key string
Value []byte
}
type optionsTupleValue struct {
Value string
}
// serialize a map of critical options or extensions
// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
// we need two length prefixes for a non-empty string value
func marshalTuples(tups map[string]string) []byte {
keys := make([]string, 0, len(tups))
for key := range tups {
keys = append(keys, key)
}
sort.Strings(keys)
var ret []byte
for _, key := range keys {
s := optionsTuple{Key: key}
if value := tups[key]; len(value) > 0 {
s.Value = Marshal(&optionsTupleValue{value})
}
ret = append(ret, Marshal(&s)...)
}
return ret
}
// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
// we need two length prefixes for a non-empty option value
func parseTuples(in []byte) (map[string]string, error) {
tups := map[string]string{}
var lastKey string
var haveLastKey bool
for len(in) > 0 {
var key, val, extra []byte
var ok bool
if key, in, ok = parseString(in); !ok {
return nil, errShortRead
}
keyStr := string(key)
// according to [PROTOCOL.certkeys], the names must be in
// lexical order.
if haveLastKey && keyStr <= lastKey {
return nil, fmt.Errorf("ssh: certificate options are not in lexical order")
}
lastKey, haveLastKey = keyStr, true
// the next field is a data field, which if non-empty has a string embedded
if val, in, ok = parseString(in); !ok {
return nil, errShortRead
}
if len(val) > 0 {
val, extra, ok = parseString(val)
if !ok {
return nil, errShortRead
}
if len(extra) > 0 {
return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value")
}
tups[keyStr] = string(val)
} else {
tups[keyStr] = ""
}
}
return tups, nil
}
func parseCert(in []byte, privAlgo string) (*Certificate, error) {
nonce, rest, ok := parseString(in)
if !ok {
return nil, errShortRead
}
key, rest, err := parsePubKey(rest, privAlgo)
if err != nil {
return nil, err
}
var g genericCertData
if err := Unmarshal(rest, &g); err != nil {
return nil, err
}
c := &Certificate{
Nonce: nonce,
Key: key,
Serial: g.Serial,
CertType: g.CertType,
KeyId: g.KeyId,
ValidAfter: g.ValidAfter,
ValidBefore: g.ValidBefore,
}
for principals := g.ValidPrincipals; len(principals) > 0; {
principal, rest, ok := parseString(principals)
if !ok {
return nil, errShortRead
}
c.ValidPrincipals = append(c.ValidPrincipals, string(principal))
principals = rest
}
c.CriticalOptions, err = parseTuples(g.CriticalOptions)
if err != nil {
return nil, err
}
c.Extensions, err = parseTuples(g.Extensions)
if err != nil {
return nil, err
}
c.Reserved = g.Reserved
k, err := ParsePublicKey(g.SignatureKey)
if err != nil {
return nil, err
}
c.SignatureKey = k
c.Signature, rest, ok = parseSignatureBody(g.Signature)
if !ok || len(rest) > 0 {
return nil, errors.New("ssh: signature parse error")
}
return c, nil
}
type openSSHCertSigner struct {
pub *Certificate
signer Signer
}
// NewCertSigner returns a Signer that signs with the given Certificate, whose
// private key is held by signer. It returns an error if the public key in cert
// doesn't match the key used by signer.
func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) {
if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 {
return nil, errors.New("ssh: signer and cert have different public key")
}
return &openSSHCertSigner{cert, signer}, nil
}
func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
return s.signer.Sign(rand, data)
}
func (s *openSSHCertSigner) PublicKey() PublicKey {
return s.pub
}
const sourceAddressCriticalOption = "source-address"
// CertChecker does the work of verifying a certificate. Its methods
// can be plugged into ClientConfig.HostKeyCallback and
// ServerConfig.PublicKeyCallback. For the CertChecker to work,
// minimally, the IsAuthority callback should be set.
type CertChecker struct {
// SupportedCriticalOptions lists the CriticalOptions that the
// server application layer understands. These are only used
// for user certificates.
SupportedCriticalOptions []string
// IsAuthority should return true if the key is recognized as
// an authority. This allows for certificates to be signed by other
// certificates.
IsAuthority func(auth PublicKey) bool
// Clock is used for verifying time stamps. If nil, time.Now
// is used.
Clock func() time.Time
// UserKeyFallback is called when CertChecker.Authenticate encounters a
// public key that is not a certificate. It must implement validation
// of user keys or else, if nil, all such keys are rejected.
UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
// HostKeyFallback is called when CertChecker.CheckHostKey encounters a
// public key that is not a certificate. It must implement host key
// validation or else, if nil, all such keys are rejected.
HostKeyFallback HostKeyCallback
// IsRevoked is called for each certificate so that revocation checking
// can be implemented. It should return true if the given certificate
// is revoked and false otherwise. If nil, no certificates are
// considered to have been revoked.
IsRevoked func(cert *Certificate) bool
}
// CheckHostKey checks a host key certificate. This method can be
// plugged into ClientConfig.HostKeyCallback.
func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error {
cert, ok := key.(*Certificate)
if !ok {
if c.HostKeyFallback != nil {
return c.HostKeyFallback(addr, remote, key)
}
return errors.New("ssh: non-certificate host key")
}
if cert.CertType != HostCert {
return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType)
}
return c.CheckCert(addr, cert)
}
// Authenticate checks a user certificate. Authenticate can be used as
// a value for ServerConfig.PublicKeyCallback.
func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) {
cert, ok := pubKey.(*Certificate)
if !ok {
if c.UserKeyFallback != nil {
return c.UserKeyFallback(conn, pubKey)
}
return nil, errors.New("ssh: normal key pairs not accepted")
}
if cert.CertType != UserCert {
return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType)
}
if err := c.CheckCert(conn.User(), cert); err != nil {
return nil, err
}
return &cert.Permissions, nil
}
// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and
// the signature of the certificate.
func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {
if c.IsRevoked != nil && c.IsRevoked(cert) {
return fmt.Errorf("ssh: certicate serial %d revoked", cert.Serial)
}
for opt, _ := range cert.CriticalOptions {
// sourceAddressCriticalOption will be enforced by
// serverAuthenticate
if opt == sourceAddressCriticalOption {
continue
}
found := false
for _, supp := range c.SupportedCriticalOptions {
if supp == opt {
found = true
break
}
}
if !found {
return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt)
}
}
if len(cert.ValidPrincipals) > 0 {
// By default, certs are valid for all users/hosts.
found := false
for _, p := range cert.ValidPrincipals {
if p == principal {
found = true
break
}
}
if !found {
return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals)
}
}
if !c.IsAuthority(cert.SignatureKey) {
return fmt.Errorf("ssh: certificate signed by unrecognized authority")
}
clock := c.Clock
if clock == nil {
clock = time.Now
}
unixNow := clock().Unix()
if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) {
return fmt.Errorf("ssh: cert is not yet valid")
}
if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) {
return fmt.Errorf("ssh: cert has expired")
}
if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil {
return fmt.Errorf("ssh: certificate signature does not verify")
}
return nil
}
// SignCert sets c.SignatureKey to the authority's public key and stores a
// Signature, by authority, in the certificate.
func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
c.Nonce = make([]byte, 32)
if _, err := io.ReadFull(rand, c.Nonce); err != nil {
return err
}
c.SignatureKey = authority.PublicKey()
sig, err := authority.Sign(rand, c.bytesForSigning())
if err != nil {
return err
}
c.Signature = sig
return nil
}
var certAlgoNames = map[string]string{
KeyAlgoRSA: CertAlgoRSAv01,
KeyAlgoDSA: CertAlgoDSAv01,
KeyAlgoECDSA256: CertAlgoECDSA256v01,
KeyAlgoECDSA384: CertAlgoECDSA384v01,
KeyAlgoECDSA521: CertAlgoECDSA521v01,
KeyAlgoED25519: CertAlgoED25519v01,
}
// certToPrivAlgo returns the underlying algorithm for a certificate algorithm.
// Panics if a non-certificate algorithm is passed.
func certToPrivAlgo(algo string) string {
for privAlgo, pubAlgo := range certAlgoNames {
if pubAlgo == algo {
return privAlgo
}
}
panic("unknown cert algorithm")
}
func (cert *Certificate) bytesForSigning() []byte {
c2 := *cert
c2.Signature = nil
out := c2.Marshal()
// Drop trailing signature length.
return out[:len(out)-4]
}
// Marshal serializes c into OpenSSH's wire format. It is part of the
// PublicKey interface.
func (c *Certificate) Marshal() []byte {
generic := genericCertData{
Serial: c.Serial,
CertType: c.CertType,
KeyId: c.KeyId,
ValidPrincipals: marshalStringList(c.ValidPrincipals),
ValidAfter: uint64(c.ValidAfter),
ValidBefore: uint64(c.ValidBefore),
CriticalOptions: marshalTuples(c.CriticalOptions),
Extensions: marshalTuples(c.Extensions),
Reserved: c.Reserved,
SignatureKey: c.SignatureKey.Marshal(),
}
if c.Signature != nil {
generic.Signature = Marshal(c.Signature)
}
genericBytes := Marshal(&generic)
keyBytes := c.Key.Marshal()
_, keyBytes, _ = parseString(keyBytes)
prefix := Marshal(&struct {
Name string
Nonce []byte
Key []byte `ssh:"rest"`
}{c.Type(), c.Nonce, keyBytes})
result := make([]byte, 0, len(prefix)+len(genericBytes))
result = append(result, prefix...)
result = append(result, genericBytes...)
return result
}
// Type returns the key name. It is part of the PublicKey interface.
func (c *Certificate) Type() string {
algo, ok := certAlgoNames[c.Key.Type()]
if !ok {
panic("unknown cert key type " + c.Key.Type())
}
return algo
}
// Verify verifies a signature against the certificate's public
// key. It is part of the PublicKey interface.
func (c *Certificate) Verify(data []byte, sig *Signature) error {
return c.Key.Verify(data, sig)
}
func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) {
format, in, ok := parseString(in)
if !ok {
return
}
out = &Signature{
Format: string(format),
}
if out.Blob, in, ok = parseString(in); !ok {
return
}
return out, in, ok
}
func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) {
sigBytes, rest, ok := parseString(in)
if !ok {
return
}
out, trailing, ok := parseSignatureBody(sigBytes)
if !ok || len(trailing) > 0 {
return nil, nil, false
}
return
}

633
vendor/golang.org/x/crypto/ssh/channel.go generated vendored Normal file
View File

@ -0,0 +1,633 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"encoding/binary"
"errors"
"fmt"
"io"
"log"
"sync"
)
const (
minPacketLength = 9
// channelMaxPacket contains the maximum number of bytes that will be
// sent in a single packet. As per RFC 4253, section 6.1, 32k is also
// the minimum.
channelMaxPacket = 1 << 15
// We follow OpenSSH here.
channelWindowSize = 64 * channelMaxPacket
)
// NewChannel represents an incoming request to a channel. It must either be
// accepted for use by calling Accept, or rejected by calling Reject.
type NewChannel interface {
// Accept accepts the channel creation request. It returns the Channel
// and a Go channel containing SSH requests. The Go channel must be
// serviced otherwise the Channel will hang.
Accept() (Channel, <-chan *Request, error)
// Reject rejects the channel creation request. After calling
// this, no other methods on the Channel may be called.
Reject(reason RejectionReason, message string) error
// ChannelType returns the type of the channel, as supplied by the
// client.
ChannelType() string
// ExtraData returns the arbitrary payload for this channel, as supplied
// by the client. This data is specific to the channel type.
ExtraData() []byte
}
// A Channel is an ordered, reliable, flow-controlled, duplex stream
// that is multiplexed over an SSH connection.
type Channel interface {
// Read reads up to len(data) bytes from the channel.
Read(data []byte) (int, error)
// Write writes len(data) bytes to the channel.
Write(data []byte) (int, error)
// Close signals end of channel use. No data may be sent after this
// call.
Close() error
// CloseWrite signals the end of sending in-band
// data. Requests may still be sent, and the other side may
// still send data
CloseWrite() error
// SendRequest sends a channel request. If wantReply is true,
// it will wait for a reply and return the result as a
// boolean, otherwise the return value will be false. Channel
// requests are out-of-band messages so they may be sent even
// if the data stream is closed or blocked by flow control.
// If the channel is closed before a reply is returned, io.EOF
// is returned.
SendRequest(name string, wantReply bool, payload []byte) (bool, error)
// Stderr returns an io.ReadWriter that writes to this channel
// with the extended data type set to stderr. Stderr may
// safely be read and written from a different goroutine than
// Read and Write respectively.
Stderr() io.ReadWriter
}
// Request is a request sent outside of the normal stream of
// data. Requests can either be specific to an SSH channel, or they
// can be global.
type Request struct {
Type string
WantReply bool
Payload []byte
ch *channel
mux *mux
}
// Reply sends a response to a request. It must be called for all requests
// where WantReply is true and is a no-op otherwise. The payload argument is
// ignored for replies to channel-specific requests.
func (r *Request) Reply(ok bool, payload []byte) error {
if !r.WantReply {
return nil
}
if r.ch == nil {
return r.mux.ackRequest(ok, payload)
}
return r.ch.ackRequest(ok)
}
// RejectionReason is an enumeration used when rejecting channel creation
// requests. See RFC 4254, section 5.1.
type RejectionReason uint32
const (
Prohibited RejectionReason = iota + 1
ConnectionFailed
UnknownChannelType
ResourceShortage
)
// String converts the rejection reason to human readable form.
func (r RejectionReason) String() string {
switch r {
case Prohibited:
return "administratively prohibited"
case ConnectionFailed:
return "connect failed"
case UnknownChannelType:
return "unknown channel type"
case ResourceShortage:
return "resource shortage"
}
return fmt.Sprintf("unknown reason %d", int(r))
}
func min(a uint32, b int) uint32 {
if a < uint32(b) {
return a
}
return uint32(b)
}
type channelDirection uint8
const (
channelInbound channelDirection = iota
channelOutbound
)
// channel is an implementation of the Channel interface that works
// with the mux class.
type channel struct {
// R/O after creation
chanType string
extraData []byte
localId, remoteId uint32
// maxIncomingPayload and maxRemotePayload are the maximum
// payload sizes of normal and extended data packets for
// receiving and sending, respectively. The wire packet will
// be 9 or 13 bytes larger (excluding encryption overhead).
maxIncomingPayload uint32
maxRemotePayload uint32
mux *mux
// decided is set to true if an accept or reject message has been sent
// (for outbound channels) or received (for inbound channels).
decided bool
// direction contains either channelOutbound, for channels created
// locally, or channelInbound, for channels created by the peer.
direction channelDirection
// Pending internal channel messages.
msg chan interface{}
// Since requests have no ID, there can be only one request
// with WantReply=true outstanding. This lock is held by a
// goroutine that has such an outgoing request pending.
sentRequestMu sync.Mutex
incomingRequests chan *Request
sentEOF bool
// thread-safe data
remoteWin window
pending *buffer
extPending *buffer
// windowMu protects myWindow, the flow-control window.
windowMu sync.Mutex
myWindow uint32
// writeMu serializes calls to mux.conn.writePacket() and
// protects sentClose and packetPool. This mutex must be
// different from windowMu, as writePacket can block if there
// is a key exchange pending.
writeMu sync.Mutex
sentClose bool
// packetPool has a buffer for each extended channel ID to
// save allocations during writes.
packetPool map[uint32][]byte
}
// writePacket sends a packet. If the packet is a channel close, it updates
// sentClose. This method takes the lock c.writeMu.
func (c *channel) writePacket(packet []byte) error {
c.writeMu.Lock()
if c.sentClose {
c.writeMu.Unlock()
return io.EOF
}
c.sentClose = (packet[0] == msgChannelClose)
err := c.mux.conn.writePacket(packet)
c.writeMu.Unlock()
return err
}
func (c *channel) sendMessage(msg interface{}) error {
if debugMux {
log.Printf("send(%d): %#v", c.mux.chanList.offset, msg)
}
p := Marshal(msg)
binary.BigEndian.PutUint32(p[1:], c.remoteId)
return c.writePacket(p)
}
// WriteExtended writes data to a specific extended stream. These streams are
// used, for example, for stderr.
func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) {
if c.sentEOF {
return 0, io.EOF
}
// 1 byte message type, 4 bytes remoteId, 4 bytes data length
opCode := byte(msgChannelData)
headerLength := uint32(9)
if extendedCode > 0 {
headerLength += 4
opCode = msgChannelExtendedData
}
c.writeMu.Lock()
packet := c.packetPool[extendedCode]
// We don't remove the buffer from packetPool, so
// WriteExtended calls from different goroutines will be
// flagged as errors by the race detector.
c.writeMu.Unlock()
for len(data) > 0 {
space := min(c.maxRemotePayload, len(data))
if space, err = c.remoteWin.reserve(space); err != nil {
return n, err
}
if want := headerLength + space; uint32(cap(packet)) < want {
packet = make([]byte, want)
} else {
packet = packet[:want]
}
todo := data[:space]
packet[0] = opCode
binary.BigEndian.PutUint32(packet[1:], c.remoteId)
if extendedCode > 0 {
binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode))
}
binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo)))
copy(packet[headerLength:], todo)
if err = c.writePacket(packet); err != nil {
return n, err
}
n += len(todo)
data = data[len(todo):]
}
c.writeMu.Lock()
c.packetPool[extendedCode] = packet
c.writeMu.Unlock()
return n, err
}
func (c *channel) handleData(packet []byte) error {
headerLen := 9
isExtendedData := packet[0] == msgChannelExtendedData
if isExtendedData {
headerLen = 13
}
if len(packet) < headerLen {
// malformed data packet
return parseError(packet[0])
}
var extended uint32
if isExtendedData {
extended = binary.BigEndian.Uint32(packet[5:])
}
length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen])
if length == 0 {
return nil
}
if length > c.maxIncomingPayload {
// TODO(hanwen): should send Disconnect?
return errors.New("ssh: incoming packet exceeds maximum payload size")
}
data := packet[headerLen:]
if length != uint32(len(data)) {
return errors.New("ssh: wrong packet length")
}
c.windowMu.Lock()
if c.myWindow < length {
c.windowMu.Unlock()
// TODO(hanwen): should send Disconnect with reason?
return errors.New("ssh: remote side wrote too much")
}
c.myWindow -= length
c.windowMu.Unlock()
if extended == 1 {
c.extPending.write(data)
} else if extended > 0 {
// discard other extended data.
} else {
c.pending.write(data)
}
return nil
}
func (c *channel) adjustWindow(n uint32) error {
c.windowMu.Lock()
// Since myWindow is managed on our side, and can never exceed
// the initial window setting, we don't worry about overflow.
c.myWindow += uint32(n)
c.windowMu.Unlock()
return c.sendMessage(windowAdjustMsg{
AdditionalBytes: uint32(n),
})
}
func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) {
switch extended {
case 1:
n, err = c.extPending.Read(data)
case 0:
n, err = c.pending.Read(data)
default:
return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended)
}
if n > 0 {
err = c.adjustWindow(uint32(n))
// sendWindowAdjust can return io.EOF if the remote
// peer has closed the connection, however we want to
// defer forwarding io.EOF to the caller of Read until
// the buffer has been drained.
if n > 0 && err == io.EOF {
err = nil
}
}
return n, err
}
func (c *channel) close() {
c.pending.eof()
c.extPending.eof()
close(c.msg)
close(c.incomingRequests)
c.writeMu.Lock()
// This is not necessary for a normal channel teardown, but if
// there was another error, it is.
c.sentClose = true
c.writeMu.Unlock()
// Unblock writers.
c.remoteWin.close()
}
// responseMessageReceived is called when a success or failure message is
// received on a channel to check that such a message is reasonable for the
// given channel.
func (c *channel) responseMessageReceived() error {
if c.direction == channelInbound {
return errors.New("ssh: channel response message received on inbound channel")
}
if c.decided {
return errors.New("ssh: duplicate response received for channel")
}
c.decided = true
return nil
}
func (c *channel) handlePacket(packet []byte) error {
switch packet[0] {
case msgChannelData, msgChannelExtendedData:
return c.handleData(packet)
case msgChannelClose:
c.sendMessage(channelCloseMsg{PeersId: c.remoteId})
c.mux.chanList.remove(c.localId)
c.close()
return nil
case msgChannelEOF:
// RFC 4254 is mute on how EOF affects dataExt messages but
// it is logical to signal EOF at the same time.
c.extPending.eof()
c.pending.eof()
return nil
}
decoded, err := decode(packet)
if err != nil {
return err
}
switch msg := decoded.(type) {
case *channelOpenFailureMsg:
if err := c.responseMessageReceived(); err != nil {
return err
}
c.mux.chanList.remove(msg.PeersId)
c.msg <- msg
case *channelOpenConfirmMsg:
if err := c.responseMessageReceived(); err != nil {
return err
}
if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize)
}
c.remoteId = msg.MyId
c.maxRemotePayload = msg.MaxPacketSize
c.remoteWin.add(msg.MyWindow)
c.msg <- msg
case *windowAdjustMsg:
if !c.remoteWin.add(msg.AdditionalBytes) {
return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes)
}
case *channelRequestMsg:
req := Request{
Type: msg.Request,
WantReply: msg.WantReply,
Payload: msg.RequestSpecificData,
ch: c,
}
c.incomingRequests <- &req
default:
c.msg <- msg
}
return nil
}
func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel {
ch := &channel{
remoteWin: window{Cond: newCond()},
myWindow: channelWindowSize,
pending: newBuffer(),
extPending: newBuffer(),
direction: direction,
incomingRequests: make(chan *Request, chanSize),
msg: make(chan interface{}, chanSize),
chanType: chanType,
extraData: extraData,
mux: m,
packetPool: make(map[uint32][]byte),
}
ch.localId = m.chanList.add(ch)
return ch
}
var errUndecided = errors.New("ssh: must Accept or Reject channel")
var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once")
type extChannel struct {
code uint32
ch *channel
}
func (e *extChannel) Write(data []byte) (n int, err error) {
return e.ch.WriteExtended(data, e.code)
}
func (e *extChannel) Read(data []byte) (n int, err error) {
return e.ch.ReadExtended(data, e.code)
}
func (c *channel) Accept() (Channel, <-chan *Request, error) {
if c.decided {
return nil, nil, errDecidedAlready
}
c.maxIncomingPayload = channelMaxPacket
confirm := channelOpenConfirmMsg{
PeersId: c.remoteId,
MyId: c.localId,
MyWindow: c.myWindow,
MaxPacketSize: c.maxIncomingPayload,
}
c.decided = true
if err := c.sendMessage(confirm); err != nil {
return nil, nil, err
}
return c, c.incomingRequests, nil
}
func (ch *channel) Reject(reason RejectionReason, message string) error {
if ch.decided {
return errDecidedAlready
}
reject := channelOpenFailureMsg{
PeersId: ch.remoteId,
Reason: reason,
Message: message,
Language: "en",
}
ch.decided = true
return ch.sendMessage(reject)
}
func (ch *channel) Read(data []byte) (int, error) {
if !ch.decided {
return 0, errUndecided
}
return ch.ReadExtended(data, 0)
}
func (ch *channel) Write(data []byte) (int, error) {
if !ch.decided {
return 0, errUndecided
}
return ch.WriteExtended(data, 0)
}
func (ch *channel) CloseWrite() error {
if !ch.decided {
return errUndecided
}
ch.sentEOF = true
return ch.sendMessage(channelEOFMsg{
PeersId: ch.remoteId})
}
func (ch *channel) Close() error {
if !ch.decided {
return errUndecided
}
return ch.sendMessage(channelCloseMsg{
PeersId: ch.remoteId})
}
// Extended returns an io.ReadWriter that sends and receives data on the given,
// SSH extended stream. Such streams are used, for example, for stderr.
func (ch *channel) Extended(code uint32) io.ReadWriter {
if !ch.decided {
return nil
}
return &extChannel{code, ch}
}
func (ch *channel) Stderr() io.ReadWriter {
return ch.Extended(1)
}
func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
if !ch.decided {
return false, errUndecided
}
if wantReply {
ch.sentRequestMu.Lock()
defer ch.sentRequestMu.Unlock()
}
msg := channelRequestMsg{
PeersId: ch.remoteId,
Request: name,
WantReply: wantReply,
RequestSpecificData: payload,
}
if err := ch.sendMessage(msg); err != nil {
return false, err
}
if wantReply {
m, ok := (<-ch.msg)
if !ok {
return false, io.EOF
}
switch m.(type) {
case *channelRequestFailureMsg:
return false, nil
case *channelRequestSuccessMsg:
return true, nil
default:
return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m)
}
}
return false, nil
}
// ackRequest either sends an ack or nack to the channel request.
func (ch *channel) ackRequest(ok bool) error {
if !ch.decided {
return errUndecided
}
var msg interface{}
if !ok {
msg = channelRequestFailureMsg{
PeersId: ch.remoteId,
}
} else {
msg = channelRequestSuccessMsg{
PeersId: ch.remoteId,
}
}
return ch.sendMessage(msg)
}
func (ch *channel) ChannelType() string {
return ch.chanType
}
func (ch *channel) ExtraData() []byte {
return ch.extraData
}

627
vendor/golang.org/x/crypto/ssh/cipher.go generated vendored Normal file
View File

@ -0,0 +1,627 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
"crypto/rc4"
"crypto/subtle"
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
)
const (
packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher.
// RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations
// MUST be able to process (plus a few more kilobytes for padding and mac). The RFC
// indicates implementations SHOULD be able to handle larger packet sizes, but then
// waffles on about reasonable limits.
//
// OpenSSH caps their maxPacket at 256kB so we choose to do
// the same. maxPacket is also used to ensure that uint32
// length fields do not overflow, so it should remain well
// below 4G.
maxPacket = 256 * 1024
)
// noneCipher implements cipher.Stream and provides no encryption. It is used
// by the transport before the first key-exchange.
type noneCipher struct{}
func (c noneCipher) XORKeyStream(dst, src []byte) {
copy(dst, src)
}
func newAESCTR(key, iv []byte) (cipher.Stream, error) {
c, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
return cipher.NewCTR(c, iv), nil
}
func newRC4(key, iv []byte) (cipher.Stream, error) {
return rc4.NewCipher(key)
}
type streamCipherMode struct {
keySize int
ivSize int
skip int
createFunc func(key, iv []byte) (cipher.Stream, error)
}
func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) {
if len(key) < c.keySize {
panic("ssh: key length too small for cipher")
}
if len(iv) < c.ivSize {
panic("ssh: iv too small for cipher")
}
stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize])
if err != nil {
return nil, err
}
var streamDump []byte
if c.skip > 0 {
streamDump = make([]byte, 512)
}
for remainingToDump := c.skip; remainingToDump > 0; {
dumpThisTime := remainingToDump
if dumpThisTime > len(streamDump) {
dumpThisTime = len(streamDump)
}
stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime])
remainingToDump -= dumpThisTime
}
return stream, nil
}
// cipherModes documents properties of supported ciphers. Ciphers not included
// are not supported and will not be negotiated, even if explicitly requested in
// ClientConfig.Crypto.Ciphers.
var cipherModes = map[string]*streamCipherMode{
// Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms
// are defined in the order specified in the RFC.
"aes128-ctr": {16, aes.BlockSize, 0, newAESCTR},
"aes192-ctr": {24, aes.BlockSize, 0, newAESCTR},
"aes256-ctr": {32, aes.BlockSize, 0, newAESCTR},
// Ciphers from RFC4345, which introduces security-improved arcfour ciphers.
// They are defined in the order specified in the RFC.
"arcfour128": {16, 0, 1536, newRC4},
"arcfour256": {32, 0, 1536, newRC4},
// Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol.
// Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and
// RC4) has problems with weak keys, and should be used with caution."
// RFC4345 introduces improved versions of Arcfour.
"arcfour": {16, 0, 0, newRC4},
// AES-GCM is not a stream cipher, so it is constructed with a
// special case. If we add any more non-stream ciphers, we
// should invest a cleaner way to do this.
gcmCipherID: {16, 12, 0, nil},
// CBC mode is insecure and so is not included in the default config.
// (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely
// needed, it's possible to specify a custom Config to enable it.
// You should expect that an active attacker can recover plaintext if
// you do.
aes128cbcID: {16, aes.BlockSize, 0, nil},
// 3des-cbc is insecure and is disabled by default.
tripledescbcID: {24, des.BlockSize, 0, nil},
}
// prefixLen is the length of the packet prefix that contains the packet length
// and number of padding bytes.
const prefixLen = 5
// streamPacketCipher is a packetCipher using a stream cipher.
type streamPacketCipher struct {
mac hash.Hash
cipher cipher.Stream
etm bool
// The following members are to avoid per-packet allocations.
prefix [prefixLen]byte
seqNumBytes [4]byte
padding [2 * packetSizeMultiple]byte
packetData []byte
macResult []byte
}
// readPacket reads and decrypt a single packet from the reader argument.
func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
if _, err := io.ReadFull(r, s.prefix[:]); err != nil {
return nil, err
}
var encryptedPaddingLength [1]byte
if s.mac != nil && s.etm {
copy(encryptedPaddingLength[:], s.prefix[4:5])
s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5])
} else {
s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
}
length := binary.BigEndian.Uint32(s.prefix[0:4])
paddingLength := uint32(s.prefix[4])
var macSize uint32
if s.mac != nil {
s.mac.Reset()
binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
s.mac.Write(s.seqNumBytes[:])
if s.etm {
s.mac.Write(s.prefix[:4])
s.mac.Write(encryptedPaddingLength[:])
} else {
s.mac.Write(s.prefix[:])
}
macSize = uint32(s.mac.Size())
}
if length <= paddingLength+1 {
return nil, errors.New("ssh: invalid packet length, packet too small")
}
if length > maxPacket {
return nil, errors.New("ssh: invalid packet length, packet too large")
}
// the maxPacket check above ensures that length-1+macSize
// does not overflow.
if uint32(cap(s.packetData)) < length-1+macSize {
s.packetData = make([]byte, length-1+macSize)
} else {
s.packetData = s.packetData[:length-1+macSize]
}
if _, err := io.ReadFull(r, s.packetData); err != nil {
return nil, err
}
mac := s.packetData[length-1:]
data := s.packetData[:length-1]
if s.mac != nil && s.etm {
s.mac.Write(data)
}
s.cipher.XORKeyStream(data, data)
if s.mac != nil {
if !s.etm {
s.mac.Write(data)
}
s.macResult = s.mac.Sum(s.macResult[:0])
if subtle.ConstantTimeCompare(s.macResult, mac) != 1 {
return nil, errors.New("ssh: MAC failure")
}
}
return s.packetData[:length-paddingLength-1], nil
}
// writePacket encrypts and sends a packet of data to the writer argument
func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
if len(packet) > maxPacket {
return errors.New("ssh: packet too large")
}
aadlen := 0
if s.mac != nil && s.etm {
// packet length is not encrypted for EtM modes
aadlen = 4
}
paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple
if paddingLength < 4 {
paddingLength += packetSizeMultiple
}
length := len(packet) + 1 + paddingLength
binary.BigEndian.PutUint32(s.prefix[:], uint32(length))
s.prefix[4] = byte(paddingLength)
padding := s.padding[:paddingLength]
if _, err := io.ReadFull(rand, padding); err != nil {
return err
}
if s.mac != nil {
s.mac.Reset()
binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
s.mac.Write(s.seqNumBytes[:])
if s.etm {
// For EtM algorithms, the packet length must stay unencrypted,
// but the following data (padding length) must be encrypted
s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5])
}
s.mac.Write(s.prefix[:])
if !s.etm {
// For non-EtM algorithms, the algorithm is applied on unencrypted data
s.mac.Write(packet)
s.mac.Write(padding)
}
}
if !(s.mac != nil && s.etm) {
// For EtM algorithms, the padding length has already been encrypted
// and the packet length must remain unencrypted
s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
}
s.cipher.XORKeyStream(packet, packet)
s.cipher.XORKeyStream(padding, padding)
if s.mac != nil && s.etm {
// For EtM algorithms, packet and padding must be encrypted
s.mac.Write(packet)
s.mac.Write(padding)
}
if _, err := w.Write(s.prefix[:]); err != nil {
return err
}
if _, err := w.Write(packet); err != nil {
return err
}
if _, err := w.Write(padding); err != nil {
return err
}
if s.mac != nil {
s.macResult = s.mac.Sum(s.macResult[:0])
if _, err := w.Write(s.macResult); err != nil {
return err
}
}
return nil
}
type gcmCipher struct {
aead cipher.AEAD
prefix [4]byte
iv []byte
buf []byte
}
func newGCMCipher(iv, key, macKey []byte) (packetCipher, error) {
c, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
aead, err := cipher.NewGCM(c)
if err != nil {
return nil, err
}
return &gcmCipher{
aead: aead,
iv: iv,
}, nil
}
const gcmTagSize = 16
func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
// Pad out to multiple of 16 bytes. This is different from the
// stream cipher because that encrypts the length too.
padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple)
if padding < 4 {
padding += packetSizeMultiple
}
length := uint32(len(packet) + int(padding) + 1)
binary.BigEndian.PutUint32(c.prefix[:], length)
if _, err := w.Write(c.prefix[:]); err != nil {
return err
}
if cap(c.buf) < int(length) {
c.buf = make([]byte, length)
} else {
c.buf = c.buf[:length]
}
c.buf[0] = padding
copy(c.buf[1:], packet)
if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil {
return err
}
c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:])
if _, err := w.Write(c.buf); err != nil {
return err
}
c.incIV()
return nil
}
func (c *gcmCipher) incIV() {
for i := 4 + 7; i >= 4; i-- {
c.iv[i]++
if c.iv[i] != 0 {
break
}
}
}
func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
if _, err := io.ReadFull(r, c.prefix[:]); err != nil {
return nil, err
}
length := binary.BigEndian.Uint32(c.prefix[:])
if length > maxPacket {
return nil, errors.New("ssh: max packet length exceeded.")
}
if cap(c.buf) < int(length+gcmTagSize) {
c.buf = make([]byte, length+gcmTagSize)
} else {
c.buf = c.buf[:length+gcmTagSize]
}
if _, err := io.ReadFull(r, c.buf); err != nil {
return nil, err
}
plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:])
if err != nil {
return nil, err
}
c.incIV()
padding := plain[0]
if padding < 4 || padding >= 20 {
return nil, fmt.Errorf("ssh: illegal padding %d", padding)
}
if int(padding+1) >= len(plain) {
return nil, fmt.Errorf("ssh: padding %d too large", padding)
}
plain = plain[1 : length-uint32(padding)]
return plain, nil
}
// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1
type cbcCipher struct {
mac hash.Hash
macSize uint32
decrypter cipher.BlockMode
encrypter cipher.BlockMode
// The following members are to avoid per-packet allocations.
seqNumBytes [4]byte
packetData []byte
macResult []byte
// Amount of data we should still read to hide which
// verification error triggered.
oracleCamouflage uint32
}
func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
cbc := &cbcCipher{
mac: macModes[algs.MAC].new(macKey),
decrypter: cipher.NewCBCDecrypter(c, iv),
encrypter: cipher.NewCBCEncrypter(c, iv),
packetData: make([]byte, 1024),
}
if cbc.mac != nil {
cbc.macSize = uint32(cbc.mac.Size())
}
return cbc, nil
}
func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
c, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
cbc, err := newCBCCipher(c, iv, key, macKey, algs)
if err != nil {
return nil, err
}
return cbc, nil
}
func newTripleDESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
c, err := des.NewTripleDESCipher(key)
if err != nil {
return nil, err
}
cbc, err := newCBCCipher(c, iv, key, macKey, algs)
if err != nil {
return nil, err
}
return cbc, nil
}
func maxUInt32(a, b int) uint32 {
if a > b {
return uint32(a)
}
return uint32(b)
}
const (
cbcMinPacketSizeMultiple = 8
cbcMinPacketSize = 16
cbcMinPaddingSize = 4
)
// cbcError represents a verification error that may leak information.
type cbcError string
func (e cbcError) Error() string { return string(e) }
func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
p, err := c.readPacketLeaky(seqNum, r)
if err != nil {
if _, ok := err.(cbcError); ok {
// Verification error: read a fixed amount of
// data, to make distinguishing between
// failing MAC and failing length check more
// difficult.
io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage))
}
}
return p, err
}
func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) {
blockSize := c.decrypter.BlockSize()
// Read the header, which will include some of the subsequent data in the
// case of block ciphers - this is copied back to the payload later.
// How many bytes of payload/padding will be read with this first read.
firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize)
firstBlock := c.packetData[:firstBlockLength]
if _, err := io.ReadFull(r, firstBlock); err != nil {
return nil, err
}
c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength
c.decrypter.CryptBlocks(firstBlock, firstBlock)
length := binary.BigEndian.Uint32(firstBlock[:4])
if length > maxPacket {
return nil, cbcError("ssh: packet too large")
}
if length+4 < maxUInt32(cbcMinPacketSize, blockSize) {
// The minimum size of a packet is 16 (or the cipher block size, whichever
// is larger) bytes.
return nil, cbcError("ssh: packet too small")
}
// The length of the packet (including the length field but not the MAC) must
// be a multiple of the block size or 8, whichever is larger.
if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 {
return nil, cbcError("ssh: invalid packet length multiple")
}
paddingLength := uint32(firstBlock[4])
if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 {
return nil, cbcError("ssh: invalid packet length")
}
// Positions within the c.packetData buffer:
macStart := 4 + length
paddingStart := macStart - paddingLength
// Entire packet size, starting before length, ending at end of mac.
entirePacketSize := macStart + c.macSize
// Ensure c.packetData is large enough for the entire packet data.
if uint32(cap(c.packetData)) < entirePacketSize {
// Still need to upsize and copy, but this should be rare at runtime, only
// on upsizing the packetData buffer.
c.packetData = make([]byte, entirePacketSize)
copy(c.packetData, firstBlock)
} else {
c.packetData = c.packetData[:entirePacketSize]
}
if n, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil {
return nil, err
} else {
c.oracleCamouflage -= uint32(n)
}
remainingCrypted := c.packetData[firstBlockLength:macStart]
c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted)
mac := c.packetData[macStart:]
if c.mac != nil {
c.mac.Reset()
binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
c.mac.Write(c.seqNumBytes[:])
c.mac.Write(c.packetData[:macStart])
c.macResult = c.mac.Sum(c.macResult[:0])
if subtle.ConstantTimeCompare(c.macResult, mac) != 1 {
return nil, cbcError("ssh: MAC failure")
}
}
return c.packetData[prefixLen:paddingStart], nil
}
func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize())
// Length of encrypted portion of the packet (header, payload, padding).
// Enforce minimum padding and packet size.
encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize)
// Enforce block size.
encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize
length := encLength - 4
paddingLength := int(length) - (1 + len(packet))
// Overall buffer contains: header, payload, padding, mac.
// Space for the MAC is reserved in the capacity but not the slice length.
bufferSize := encLength + c.macSize
if uint32(cap(c.packetData)) < bufferSize {
c.packetData = make([]byte, encLength, bufferSize)
} else {
c.packetData = c.packetData[:encLength]
}
p := c.packetData
// Packet header.
binary.BigEndian.PutUint32(p, length)
p = p[4:]
p[0] = byte(paddingLength)
// Payload.
p = p[1:]
copy(p, packet)
// Padding.
p = p[len(packet):]
if _, err := io.ReadFull(rand, p); err != nil {
return err
}
if c.mac != nil {
c.mac.Reset()
binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
c.mac.Write(c.seqNumBytes[:])
c.mac.Write(c.packetData)
// The MAC is now appended into the capacity reserved for it earlier.
c.packetData = c.mac.Sum(c.packetData)
}
c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength])
if _, err := w.Write(c.packetData); err != nil {
return err
}
return nil
}

256
vendor/golang.org/x/crypto/ssh/client.go generated vendored Normal file
View File

@ -0,0 +1,256 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"bytes"
"errors"
"fmt"
"net"
"sync"
"time"
)
// Client implements a traditional SSH client that supports shells,
// subprocesses, port forwarding and tunneled dialing.
type Client struct {
Conn
forwards forwardList // forwarded tcpip connections from the remote side
mu sync.Mutex
channelHandlers map[string]chan NewChannel
}
// HandleChannelOpen returns a channel on which NewChannel requests
// for the given type are sent. If the type already is being handled,
// nil is returned. The channel is closed when the connection is closed.
func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel {
c.mu.Lock()
defer c.mu.Unlock()
if c.channelHandlers == nil {
// The SSH channel has been closed.
c := make(chan NewChannel)
close(c)
return c
}
ch := c.channelHandlers[channelType]
if ch != nil {
return nil
}
ch = make(chan NewChannel, chanSize)
c.channelHandlers[channelType] = ch
return ch
}
// NewClient creates a Client on top of the given connection.
func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client {
conn := &Client{
Conn: c,
channelHandlers: make(map[string]chan NewChannel, 1),
}
go conn.handleGlobalRequests(reqs)
go conn.handleChannelOpens(chans)
go func() {
conn.Wait()
conn.forwards.closeAll()
}()
go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip"))
return conn
}
// NewClientConn establishes an authenticated SSH connection using c
// as the underlying transport. The Request and NewChannel channels
// must be serviced or the connection will hang.
func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) {
fullConf := *config
fullConf.SetDefaults()
if fullConf.HostKeyCallback == nil {
c.Close()
return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback")
}
conn := &connection{
sshConn: sshConn{conn: c},
}
if err := conn.clientHandshake(addr, &fullConf); err != nil {
c.Close()
return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err)
}
conn.mux = newMux(conn.transport)
return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil
}
// clientHandshake performs the client side key exchange. See RFC 4253 Section
// 7.
func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error {
if config.ClientVersion != "" {
c.clientVersion = []byte(config.ClientVersion)
} else {
c.clientVersion = []byte(packageVersion)
}
var err error
c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion)
if err != nil {
return err
}
c.transport = newClientTransport(
newTransport(c.sshConn.conn, config.Rand, true /* is client */),
c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr())
if err := c.transport.waitSession(); err != nil {
return err
}
c.sessionID = c.transport.getSessionID()
return c.clientAuthenticate(config)
}
// verifyHostKeySignature verifies the host key obtained in the key
// exchange.
func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error {
sig, rest, ok := parseSignatureBody(result.Signature)
if len(rest) > 0 || !ok {
return errors.New("ssh: signature parse error")
}
return hostKey.Verify(result.H, sig)
}
// NewSession opens a new Session for this client. (A session is a remote
// execution of a program.)
func (c *Client) NewSession() (*Session, error) {
ch, in, err := c.OpenChannel("session", nil)
if err != nil {
return nil, err
}
return newSession(ch, in)
}
func (c *Client) handleGlobalRequests(incoming <-chan *Request) {
for r := range incoming {
// This handles keepalive messages and matches
// the behaviour of OpenSSH.
r.Reply(false, nil)
}
}
// handleChannelOpens channel open messages from the remote side.
func (c *Client) handleChannelOpens(in <-chan NewChannel) {
for ch := range in {
c.mu.Lock()
handler := c.channelHandlers[ch.ChannelType()]
c.mu.Unlock()
if handler != nil {
handler <- ch
} else {
ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType()))
}
}
c.mu.Lock()
for _, ch := range c.channelHandlers {
close(ch)
}
c.channelHandlers = nil
c.mu.Unlock()
}
// Dial starts a client connection to the given SSH server. It is a
// convenience function that connects to the given network address,
// initiates the SSH handshake, and then sets up a Client. For access
// to incoming channels and requests, use net.Dial with NewClientConn
// instead.
func Dial(network, addr string, config *ClientConfig) (*Client, error) {
conn, err := net.DialTimeout(network, addr, config.Timeout)
if err != nil {
return nil, err
}
c, chans, reqs, err := NewClientConn(conn, addr, config)
if err != nil {
return nil, err
}
return NewClient(c, chans, reqs), nil
}
// HostKeyCallback is the function type used for verifying server
// keys. A HostKeyCallback must return nil if the host key is OK, or
// an error to reject it. It receives the hostname as passed to Dial
// or NewClientConn. The remote address is the RemoteAddr of the
// net.Conn underlying the the SSH connection.
type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
// A ClientConfig structure is used to configure a Client. It must not be
// modified after having been passed to an SSH function.
type ClientConfig struct {
// Config contains configuration that is shared between clients and
// servers.
Config
// User contains the username to authenticate as.
User string
// Auth contains possible authentication methods to use with the
// server. Only the first instance of a particular RFC 4252 method will
// be used during authentication.
Auth []AuthMethod
// HostKeyCallback is called during the cryptographic
// handshake to validate the server's host key. The client
// configuration must supply this callback for the connection
// to succeed. The functions InsecureIgnoreHostKey or
// FixedHostKey can be used for simplistic host key checks.
HostKeyCallback HostKeyCallback
// ClientVersion contains the version identification string that will
// be used for the connection. If empty, a reasonable default is used.
ClientVersion string
// HostKeyAlgorithms lists the key types that the client will
// accept from the server as host key, in order of
// preference. If empty, a reasonable default is used. Any
// string returned from PublicKey.Type method may be used, or
// any of the CertAlgoXxxx and KeyAlgoXxxx constants.
HostKeyAlgorithms []string
// Timeout is the maximum amount of time for the TCP connection to establish.
//
// A Timeout of zero means no timeout.
Timeout time.Duration
}
// InsecureIgnoreHostKey returns a function that can be used for
// ClientConfig.HostKeyCallback to accept any host key. It should
// not be used for production code.
func InsecureIgnoreHostKey() HostKeyCallback {
return func(hostname string, remote net.Addr, key PublicKey) error {
return nil
}
}
type fixedHostKey struct {
key PublicKey
}
func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error {
if f.key == nil {
return fmt.Errorf("ssh: required host key was nil")
}
if !bytes.Equal(key.Marshal(), f.key.Marshal()) {
return fmt.Errorf("ssh: host key mismatch")
}
return nil
}
// FixedHostKey returns a function for use in
// ClientConfig.HostKeyCallback to accept only a specific host key.
func FixedHostKey(key PublicKey) HostKeyCallback {
hk := &fixedHostKey{key}
return hk.check
}

486
vendor/golang.org/x/crypto/ssh/client_auth.go generated vendored Normal file
View File

@ -0,0 +1,486 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"bytes"
"errors"
"fmt"
"io"
)
// clientAuthenticate authenticates with the remote server. See RFC 4252.
func (c *connection) clientAuthenticate(config *ClientConfig) error {
// initiate user auth session
if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil {
return err
}
packet, err := c.transport.readPacket()
if err != nil {
return err
}
var serviceAccept serviceAcceptMsg
if err := Unmarshal(packet, &serviceAccept); err != nil {
return err
}
// during the authentication phase the client first attempts the "none" method
// then any untried methods suggested by the server.
tried := make(map[string]bool)
var lastMethods []string
sessionID := c.transport.getSessionID()
for auth := AuthMethod(new(noneAuth)); auth != nil; {
ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand)
if err != nil {
return err
}
if ok {
// success
return nil
}
tried[auth.method()] = true
if methods == nil {
methods = lastMethods
}
lastMethods = methods
auth = nil
findNext:
for _, a := range config.Auth {
candidateMethod := a.method()
if tried[candidateMethod] {
continue
}
for _, meth := range methods {
if meth == candidateMethod {
auth = a
break findNext
}
}
}
}
return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried))
}
func keys(m map[string]bool) []string {
s := make([]string, 0, len(m))
for key := range m {
s = append(s, key)
}
return s
}
// An AuthMethod represents an instance of an RFC 4252 authentication method.
type AuthMethod interface {
// auth authenticates user over transport t.
// Returns true if authentication is successful.
// If authentication is not successful, a []string of alternative
// method names is returned. If the slice is nil, it will be ignored
// and the previous set of possible methods will be reused.
auth(session []byte, user string, p packetConn, rand io.Reader) (bool, []string, error)
// method returns the RFC 4252 method name.
method() string
}
// "none" authentication, RFC 4252 section 5.2.
type noneAuth int
func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
if err := c.writePacket(Marshal(&userAuthRequestMsg{
User: user,
Service: serviceSSH,
Method: "none",
})); err != nil {
return false, nil, err
}
return handleAuthResponse(c)
}
func (n *noneAuth) method() string {
return "none"
}
// passwordCallback is an AuthMethod that fetches the password through
// a function call, e.g. by prompting the user.
type passwordCallback func() (password string, err error)
func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
type passwordAuthMsg struct {
User string `sshtype:"50"`
Service string
Method string
Reply bool
Password string
}
pw, err := cb()
// REVIEW NOTE: is there a need to support skipping a password attempt?
// The program may only find out that the user doesn't have a password
// when prompting.
if err != nil {
return false, nil, err
}
if err := c.writePacket(Marshal(&passwordAuthMsg{
User: user,
Service: serviceSSH,
Method: cb.method(),
Reply: false,
Password: pw,
})); err != nil {
return false, nil, err
}
return handleAuthResponse(c)
}
func (cb passwordCallback) method() string {
return "password"
}
// Password returns an AuthMethod using the given password.
func Password(secret string) AuthMethod {
return passwordCallback(func() (string, error) { return secret, nil })
}
// PasswordCallback returns an AuthMethod that uses a callback for
// fetching a password.
func PasswordCallback(prompt func() (secret string, err error)) AuthMethod {
return passwordCallback(prompt)
}
type publickeyAuthMsg struct {
User string `sshtype:"50"`
Service string
Method string
// HasSig indicates to the receiver packet that the auth request is signed and
// should be used for authentication of the request.
HasSig bool
Algoname string
PubKey []byte
// Sig is tagged with "rest" so Marshal will exclude it during
// validateKey
Sig []byte `ssh:"rest"`
}
// publicKeyCallback is an AuthMethod that uses a set of key
// pairs for authentication.
type publicKeyCallback func() ([]Signer, error)
func (cb publicKeyCallback) method() string {
return "publickey"
}
func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
// Authentication is performed by sending an enquiry to test if a key is
// acceptable to the remote. If the key is acceptable, the client will
// attempt to authenticate with the valid key. If not the client will repeat
// the process with the remaining keys.
signers, err := cb()
if err != nil {
return false, nil, err
}
var methods []string
for _, signer := range signers {
ok, err := validateKey(signer.PublicKey(), user, c)
if err != nil {
return false, nil, err
}
if !ok {
continue
}
pub := signer.PublicKey()
pubKey := pub.Marshal()
sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{
User: user,
Service: serviceSSH,
Method: cb.method(),
}, []byte(pub.Type()), pubKey))
if err != nil {
return false, nil, err
}
// manually wrap the serialized signature in a string
s := Marshal(sign)
sig := make([]byte, stringLength(len(s)))
marshalString(sig, s)
msg := publickeyAuthMsg{
User: user,
Service: serviceSSH,
Method: cb.method(),
HasSig: true,
Algoname: pub.Type(),
PubKey: pubKey,
Sig: sig,
}
p := Marshal(&msg)
if err := c.writePacket(p); err != nil {
return false, nil, err
}
var success bool
success, methods, err = handleAuthResponse(c)
if err != nil {
return false, nil, err
}
// If authentication succeeds or the list of available methods does not
// contain the "publickey" method, do not attempt to authenticate with any
// other keys. According to RFC 4252 Section 7, the latter can occur when
// additional authentication methods are required.
if success || !containsMethod(methods, cb.method()) {
return success, methods, err
}
}
return false, methods, nil
}
func containsMethod(methods []string, method string) bool {
for _, m := range methods {
if m == method {
return true
}
}
return false
}
// validateKey validates the key provided is acceptable to the server.
func validateKey(key PublicKey, user string, c packetConn) (bool, error) {
pubKey := key.Marshal()
msg := publickeyAuthMsg{
User: user,
Service: serviceSSH,
Method: "publickey",
HasSig: false,
Algoname: key.Type(),
PubKey: pubKey,
}
if err := c.writePacket(Marshal(&msg)); err != nil {
return false, err
}
return confirmKeyAck(key, c)
}
func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
pubKey := key.Marshal()
algoname := key.Type()
for {
packet, err := c.readPacket()
if err != nil {
return false, err
}
switch packet[0] {
case msgUserAuthBanner:
// TODO(gpaul): add callback to present the banner to the user
case msgUserAuthPubKeyOk:
var msg userAuthPubKeyOkMsg
if err := Unmarshal(packet, &msg); err != nil {
return false, err
}
if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) {
return false, nil
}
return true, nil
case msgUserAuthFailure:
return false, nil
default:
return false, unexpectedMessageError(msgUserAuthSuccess, packet[0])
}
}
}
// PublicKeys returns an AuthMethod that uses the given key
// pairs.
func PublicKeys(signers ...Signer) AuthMethod {
return publicKeyCallback(func() ([]Signer, error) { return signers, nil })
}
// PublicKeysCallback returns an AuthMethod that runs the given
// function to obtain a list of key pairs.
func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod {
return publicKeyCallback(getSigners)
}
// handleAuthResponse returns whether the preceding authentication request succeeded
// along with a list of remaining authentication methods to try next and
// an error if an unexpected response was received.
func handleAuthResponse(c packetConn) (bool, []string, error) {
for {
packet, err := c.readPacket()
if err != nil {
return false, nil, err
}
switch packet[0] {
case msgUserAuthBanner:
// TODO: add callback to present the banner to the user
case msgUserAuthFailure:
var msg userAuthFailureMsg
if err := Unmarshal(packet, &msg); err != nil {
return false, nil, err
}
return false, msg.Methods, nil
case msgUserAuthSuccess:
return true, nil, nil
default:
return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0])
}
}
}
// KeyboardInteractiveChallenge should print questions, optionally
// disabling echoing (e.g. for passwords), and return all the answers.
// Challenge may be called multiple times in a single session. After
// successful authentication, the server may send a challenge with no
// questions, for which the user and instruction messages should be
// printed. RFC 4256 section 3.3 details how the UI should behave for
// both CLI and GUI environments.
type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error)
// KeyboardInteractive returns a AuthMethod using a prompt/response
// sequence controlled by the server.
func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod {
return challenge
}
func (cb KeyboardInteractiveChallenge) method() string {
return "keyboard-interactive"
}
func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
type initiateMsg struct {
User string `sshtype:"50"`
Service string
Method string
Language string
Submethods string
}
if err := c.writePacket(Marshal(&initiateMsg{
User: user,
Service: serviceSSH,
Method: "keyboard-interactive",
})); err != nil {
return false, nil, err
}
for {
packet, err := c.readPacket()
if err != nil {
return false, nil, err
}
// like handleAuthResponse, but with less options.
switch packet[0] {
case msgUserAuthBanner:
// TODO: Print banners during userauth.
continue
case msgUserAuthInfoRequest:
// OK
case msgUserAuthFailure:
var msg userAuthFailureMsg
if err := Unmarshal(packet, &msg); err != nil {
return false, nil, err
}
return false, msg.Methods, nil
case msgUserAuthSuccess:
return true, nil, nil
default:
return false, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0])
}
var msg userAuthInfoRequestMsg
if err := Unmarshal(packet, &msg); err != nil {
return false, nil, err
}
// Manually unpack the prompt/echo pairs.
rest := msg.Prompts
var prompts []string
var echos []bool
for i := 0; i < int(msg.NumPrompts); i++ {
prompt, r, ok := parseString(rest)
if !ok || len(r) == 0 {
return false, nil, errors.New("ssh: prompt format error")
}
prompts = append(prompts, string(prompt))
echos = append(echos, r[0] != 0)
rest = r[1:]
}
if len(rest) != 0 {
return false, nil, errors.New("ssh: extra data following keyboard-interactive pairs")
}
answers, err := cb(msg.User, msg.Instruction, prompts, echos)
if err != nil {
return false, nil, err
}
if len(answers) != len(prompts) {
return false, nil, errors.New("ssh: not enough answers from keyboard-interactive callback")
}
responseLength := 1 + 4
for _, a := range answers {
responseLength += stringLength(len(a))
}
serialized := make([]byte, responseLength)
p := serialized
p[0] = msgUserAuthInfoResponse
p = p[1:]
p = marshalUint32(p, uint32(len(answers)))
for _, a := range answers {
p = marshalString(p, []byte(a))
}
if err := c.writePacket(serialized); err != nil {
return false, nil, err
}
}
}
type retryableAuthMethod struct {
authMethod AuthMethod
maxTries int
}
func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok bool, methods []string, err error) {
for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ {
ok, methods, err = r.authMethod.auth(session, user, c, rand)
if ok || err != nil { // either success or error terminate
return ok, methods, err
}
}
return ok, methods, err
}
func (r *retryableAuthMethod) method() string {
return r.authMethod.method()
}
// RetryableAuthMethod is a decorator for other auth methods enabling them to
// be retried up to maxTries before considering that AuthMethod itself failed.
// If maxTries is <= 0, will retry indefinitely
//
// This is useful for interactive clients using challenge/response type
// authentication (e.g. Keyboard-Interactive, Password, etc) where the user
// could mistype their response resulting in the server issuing a
// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4
// [keyboard-interactive]); Without this decorator, the non-retryable
// AuthMethod would be removed from future consideration, and never tried again
// (and so the user would never be able to retry their entry).
func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod {
return &retryableAuthMethod{authMethod: auth, maxTries: maxTries}
}

371
vendor/golang.org/x/crypto/ssh/common.go generated vendored Normal file
View File

@ -0,0 +1,371 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"crypto"
"crypto/rand"
"fmt"
"io"
"sync"
_ "crypto/sha1"
_ "crypto/sha256"
_ "crypto/sha512"
)
// These are string constants in the SSH protocol.
const (
compressionNone = "none"
serviceUserAuth = "ssh-userauth"
serviceSSH = "ssh-connection"
)
// supportedCiphers specifies the supported ciphers in preference order.
var supportedCiphers = []string{
"aes128-ctr", "aes192-ctr", "aes256-ctr",
"aes128-gcm@openssh.com",
"arcfour256", "arcfour128",
}
// supportedKexAlgos specifies the supported key-exchange algorithms in
// preference order.
var supportedKexAlgos = []string{
kexAlgoCurve25519SHA256,
// P384 and P521 are not constant-time yet, but since we don't
// reuse ephemeral keys, using them for ECDH should be OK.
kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
kexAlgoDH14SHA1, kexAlgoDH1SHA1,
}
// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods
// of authenticating servers) in preference order.
var supportedHostKeyAlgos = []string{
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01,
KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
KeyAlgoRSA, KeyAlgoDSA,
KeyAlgoED25519,
}
// supportedMACs specifies a default set of MAC algorithms in preference order.
// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed
// because they have reached the end of their useful life.
var supportedMACs = []string{
"hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96",
}
var supportedCompressions = []string{compressionNone}
// hashFuncs keeps the mapping of supported algorithms to their respective
// hashes needed for signature verification.
var hashFuncs = map[string]crypto.Hash{
KeyAlgoRSA: crypto.SHA1,
KeyAlgoDSA: crypto.SHA1,
KeyAlgoECDSA256: crypto.SHA256,
KeyAlgoECDSA384: crypto.SHA384,
KeyAlgoECDSA521: crypto.SHA512,
CertAlgoRSAv01: crypto.SHA1,
CertAlgoDSAv01: crypto.SHA1,
CertAlgoECDSA256v01: crypto.SHA256,
CertAlgoECDSA384v01: crypto.SHA384,
CertAlgoECDSA521v01: crypto.SHA512,
}
// unexpectedMessageError results when the SSH message that we received didn't
// match what we wanted.
func unexpectedMessageError(expected, got uint8) error {
return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected)
}
// parseError results from a malformed SSH message.
func parseError(tag uint8) error {
return fmt.Errorf("ssh: parse error in message type %d", tag)
}
func findCommon(what string, client []string, server []string) (common string, err error) {
for _, c := range client {
for _, s := range server {
if c == s {
return c, nil
}
}
}
return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server)
}
type directionAlgorithms struct {
Cipher string
MAC string
Compression string
}
// rekeyBytes returns a rekeying intervals in bytes.
func (a *directionAlgorithms) rekeyBytes() int64 {
// According to RFC4344 block ciphers should rekey after
// 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is
// 128.
switch a.Cipher {
case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID:
return 16 * (1 << 32)
}
// For others, stick with RFC4253 recommendation to rekey after 1 Gb of data.
return 1 << 30
}
type algorithms struct {
kex string
hostKey string
w directionAlgorithms
r directionAlgorithms
}
func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) {
result := &algorithms{}
result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos)
if err != nil {
return
}
result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos)
if err != nil {
return
}
result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer)
if err != nil {
return
}
result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient)
if err != nil {
return
}
result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer)
if err != nil {
return
}
result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient)
if err != nil {
return
}
result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer)
if err != nil {
return
}
result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient)
if err != nil {
return
}
return result, nil
}
// If rekeythreshold is too small, we can't make any progress sending
// stuff.
const minRekeyThreshold uint64 = 256
// Config contains configuration data common to both ServerConfig and
// ClientConfig.
type Config struct {
// Rand provides the source of entropy for cryptographic
// primitives. If Rand is nil, the cryptographic random reader
// in package crypto/rand will be used.
Rand io.Reader
// The maximum number of bytes sent or received after which a
// new key is negotiated. It must be at least 256. If
// unspecified, 1 gigabyte is used.
RekeyThreshold uint64
// The allowed key exchanges algorithms. If unspecified then a
// default set of algorithms is used.
KeyExchanges []string
// The allowed cipher algorithms. If unspecified then a sensible
// default is used.
Ciphers []string
// The allowed MAC algorithms. If unspecified then a sensible default
// is used.
MACs []string
}
// SetDefaults sets sensible values for unset fields in config. This is
// exported for testing: Configs passed to SSH functions are copied and have
// default values set automatically.
func (c *Config) SetDefaults() {
if c.Rand == nil {
c.Rand = rand.Reader
}
if c.Ciphers == nil {
c.Ciphers = supportedCiphers
}
var ciphers []string
for _, c := range c.Ciphers {
if cipherModes[c] != nil {
// reject the cipher if we have no cipherModes definition
ciphers = append(ciphers, c)
}
}
c.Ciphers = ciphers
if c.KeyExchanges == nil {
c.KeyExchanges = supportedKexAlgos
}
if c.MACs == nil {
c.MACs = supportedMACs
}
if c.RekeyThreshold == 0 {
// RFC 4253, section 9 suggests rekeying after 1G.
c.RekeyThreshold = 1 << 30
}
if c.RekeyThreshold < minRekeyThreshold {
c.RekeyThreshold = minRekeyThreshold
}
}
// buildDataSignedForAuth returns the data that is signed in order to prove
// possession of a private key. See RFC 4252, section 7.
func buildDataSignedForAuth(sessionId []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte {
data := struct {
Session []byte
Type byte
User string
Service string
Method string
Sign bool
Algo []byte
PubKey []byte
}{
sessionId,
msgUserAuthRequest,
req.User,
req.Service,
req.Method,
true,
algo,
pubKey,
}
return Marshal(data)
}
func appendU16(buf []byte, n uint16) []byte {
return append(buf, byte(n>>8), byte(n))
}
func appendU32(buf []byte, n uint32) []byte {
return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
}
func appendU64(buf []byte, n uint64) []byte {
return append(buf,
byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32),
byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
}
func appendInt(buf []byte, n int) []byte {
return appendU32(buf, uint32(n))
}
func appendString(buf []byte, s string) []byte {
buf = appendU32(buf, uint32(len(s)))
buf = append(buf, s...)
return buf
}
func appendBool(buf []byte, b bool) []byte {
if b {
return append(buf, 1)
}
return append(buf, 0)
}
// newCond is a helper to hide the fact that there is no usable zero
// value for sync.Cond.
func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) }
// window represents the buffer available to clients
// wishing to write to a channel.
type window struct {
*sync.Cond
win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1
writeWaiters int
closed bool
}
// add adds win to the amount of window available
// for consumers.
func (w *window) add(win uint32) bool {
// a zero sized window adjust is a noop.
if win == 0 {
return true
}
w.L.Lock()
if w.win+win < win {
w.L.Unlock()
return false
}
w.win += win
// It is unusual that multiple goroutines would be attempting to reserve
// window space, but not guaranteed. Use broadcast to notify all waiters
// that additional window is available.
w.Broadcast()
w.L.Unlock()
return true
}
// close sets the window to closed, so all reservations fail
// immediately.
func (w *window) close() {
w.L.Lock()
w.closed = true
w.Broadcast()
w.L.Unlock()
}
// reserve reserves win from the available window capacity.
// If no capacity remains, reserve will block. reserve may
// return less than requested.
func (w *window) reserve(win uint32) (uint32, error) {
var err error
w.L.Lock()
w.writeWaiters++
w.Broadcast()
for w.win == 0 && !w.closed {
w.Wait()
}
w.writeWaiters--
if w.win < win {
win = w.win
}
w.win -= win
if w.closed {
err = io.EOF
}
w.L.Unlock()
return win, err
}
// waitWriterBlocked waits until some goroutine is blocked for further
// writes. It is used in tests only.
func (w *window) waitWriterBlocked() {
w.Cond.L.Lock()
for w.writeWaiters == 0 {
w.Cond.Wait()
}
w.Cond.L.Unlock()
}

143
vendor/golang.org/x/crypto/ssh/connection.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"fmt"
"net"
)
// OpenChannelError is returned if the other side rejects an
// OpenChannel request.
type OpenChannelError struct {
Reason RejectionReason
Message string
}
func (e *OpenChannelError) Error() string {
return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message)
}
// ConnMetadata holds metadata for the connection.
type ConnMetadata interface {
// User returns the user ID for this connection.
User() string
// SessionID returns the sesson hash, also denoted by H.
SessionID() []byte
// ClientVersion returns the client's version string as hashed
// into the session ID.
ClientVersion() []byte
// ServerVersion returns the server's version string as hashed
// into the session ID.
ServerVersion() []byte
// RemoteAddr returns the remote address for this connection.
RemoteAddr() net.Addr
// LocalAddr returns the local address for this connection.
LocalAddr() net.Addr
}
// Conn represents an SSH connection for both server and client roles.
// Conn is the basis for implementing an application layer, such
// as ClientConn, which implements the traditional shell access for
// clients.
type Conn interface {
ConnMetadata
// SendRequest sends a global request, and returns the
// reply. If wantReply is true, it returns the response status
// and payload. See also RFC4254, section 4.
SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error)
// OpenChannel tries to open an channel. If the request is
// rejected, it returns *OpenChannelError. On success it returns
// the SSH Channel and a Go channel for incoming, out-of-band
// requests. The Go channel must be serviced, or the
// connection will hang.
OpenChannel(name string, data []byte) (Channel, <-chan *Request, error)
// Close closes the underlying network connection
Close() error
// Wait blocks until the connection has shut down, and returns the
// error causing the shutdown.
Wait() error
// TODO(hanwen): consider exposing:
// RequestKeyChange
// Disconnect
}
// DiscardRequests consumes and rejects all requests from the
// passed-in channel.
func DiscardRequests(in <-chan *Request) {
for req := range in {
if req.WantReply {
req.Reply(false, nil)
}
}
}
// A connection represents an incoming connection.
type connection struct {
transport *handshakeTransport
sshConn
// The connection protocol.
*mux
}
func (c *connection) Close() error {
return c.sshConn.conn.Close()
}
// sshconn provides net.Conn metadata, but disallows direct reads and
// writes.
type sshConn struct {
conn net.Conn
user string
sessionID []byte
clientVersion []byte
serverVersion []byte
}
func dup(src []byte) []byte {
dst := make([]byte, len(src))
copy(dst, src)
return dst
}
func (c *sshConn) User() string {
return c.user
}
func (c *sshConn) RemoteAddr() net.Addr {
return c.conn.RemoteAddr()
}
func (c *sshConn) Close() error {
return c.conn.Close()
}
func (c *sshConn) LocalAddr() net.Addr {
return c.conn.LocalAddr()
}
func (c *sshConn) SessionID() []byte {
return dup(c.sessionID)
}
func (c *sshConn) ClientVersion() []byte {
return dup(c.clientVersion)
}
func (c *sshConn) ServerVersion() []byte {
return dup(c.serverVersion)
}

21
vendor/golang.org/x/crypto/ssh/doc.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package ssh implements an SSH client and server.
SSH is a transport security protocol, an authentication protocol and a
family of application protocols. The most typical application level
protocol is a remote shell and this is specifically implemented. However,
the multiplexed nature of SSH is exposed to users that wish to support
others.
References:
[PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
[SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
This package does not fall under the stability promise of the Go language itself,
so its API may be changed when pressing needs arise.
*/
package ssh // import "golang.org/x/crypto/ssh"

625
vendor/golang.org/x/crypto/ssh/handshake.go generated vendored Normal file
View File

@ -0,0 +1,625 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"crypto/rand"
"errors"
"fmt"
"io"
"log"
"net"
"sync"
)
// debugHandshake, if set, prints messages sent and received. Key
// exchange messages are printed as if DH were used, so the debug
// messages are wrong when using ECDH.
const debugHandshake = false
// chanSize sets the amount of buffering SSH connections. This is
// primarily for testing: setting chanSize=0 uncovers deadlocks more
// quickly.
const chanSize = 16
// keyingTransport is a packet based transport that supports key
// changes. It need not be thread-safe. It should pass through
// msgNewKeys in both directions.
type keyingTransport interface {
packetConn
// prepareKeyChange sets up a key change. The key change for a
// direction will be effected if a msgNewKeys message is sent
// or received.
prepareKeyChange(*algorithms, *kexResult) error
}
// handshakeTransport implements rekeying on top of a keyingTransport
// and offers a thread-safe writePacket() interface.
type handshakeTransport struct {
conn keyingTransport
config *Config
serverVersion []byte
clientVersion []byte
// hostKeys is non-empty if we are the server. In that case,
// it contains all host keys that can be used to sign the
// connection.
hostKeys []Signer
// hostKeyAlgorithms is non-empty if we are the client. In that case,
// we accept these key types from the server as host key.
hostKeyAlgorithms []string
// On read error, incoming is closed, and readError is set.
incoming chan []byte
readError error
mu sync.Mutex
writeError error
sentInitPacket []byte
sentInitMsg *kexInitMsg
pendingPackets [][]byte // Used when a key exchange is in progress.
// If the read loop wants to schedule a kex, it pings this
// channel, and the write loop will send out a kex
// message.
requestKex chan struct{}
// If the other side requests or confirms a kex, its kexInit
// packet is sent here for the write loop to find it.
startKex chan *pendingKex
// data for host key checking
hostKeyCallback HostKeyCallback
dialAddress string
remoteAddr net.Addr
// Algorithms agreed in the last key exchange.
algorithms *algorithms
readPacketsLeft uint32
readBytesLeft int64
writePacketsLeft uint32
writeBytesLeft int64
// The session ID or nil if first kex did not complete yet.
sessionID []byte
}
type pendingKex struct {
otherInit []byte
done chan error
}
func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport {
t := &handshakeTransport{
conn: conn,
serverVersion: serverVersion,
clientVersion: clientVersion,
incoming: make(chan []byte, chanSize),
requestKex: make(chan struct{}, 1),
startKex: make(chan *pendingKex, 1),
config: config,
}
// We always start with a mandatory key exchange.
t.requestKex <- struct{}{}
return t
}
func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport {
t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
t.dialAddress = dialAddr
t.remoteAddr = addr
t.hostKeyCallback = config.HostKeyCallback
if config.HostKeyAlgorithms != nil {
t.hostKeyAlgorithms = config.HostKeyAlgorithms
} else {
t.hostKeyAlgorithms = supportedHostKeyAlgos
}
go t.readLoop()
go t.kexLoop()
return t
}
func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport {
t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
t.hostKeys = config.hostKeys
go t.readLoop()
go t.kexLoop()
return t
}
func (t *handshakeTransport) getSessionID() []byte {
return t.sessionID
}
// waitSession waits for the session to be established. This should be
// the first thing to call after instantiating handshakeTransport.
func (t *handshakeTransport) waitSession() error {
p, err := t.readPacket()
if err != nil {
return err
}
if p[0] != msgNewKeys {
return fmt.Errorf("ssh: first packet should be msgNewKeys")
}
return nil
}
func (t *handshakeTransport) id() string {
if len(t.hostKeys) > 0 {
return "server"
}
return "client"
}
func (t *handshakeTransport) printPacket(p []byte, write bool) {
action := "got"
if write {
action = "sent"
}
if p[0] == msgChannelData || p[0] == msgChannelExtendedData {
log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p))
} else {
msg, err := decode(p)
log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err)
}
}
func (t *handshakeTransport) readPacket() ([]byte, error) {
p, ok := <-t.incoming
if !ok {
return nil, t.readError
}
return p, nil
}
func (t *handshakeTransport) readLoop() {
first := true
for {
p, err := t.readOnePacket(first)
first = false
if err != nil {
t.readError = err
close(t.incoming)
break
}
if p[0] == msgIgnore || p[0] == msgDebug {
continue
}
t.incoming <- p
}
// Stop writers too.
t.recordWriteError(t.readError)
// Unblock the writer should it wait for this.
close(t.startKex)
// Don't close t.requestKex; it's also written to from writePacket.
}
func (t *handshakeTransport) pushPacket(p []byte) error {
if debugHandshake {
t.printPacket(p, true)
}
return t.conn.writePacket(p)
}
func (t *handshakeTransport) getWriteError() error {
t.mu.Lock()
defer t.mu.Unlock()
return t.writeError
}
func (t *handshakeTransport) recordWriteError(err error) {
t.mu.Lock()
defer t.mu.Unlock()
if t.writeError == nil && err != nil {
t.writeError = err
}
}
func (t *handshakeTransport) requestKeyExchange() {
select {
case t.requestKex <- struct{}{}:
default:
// something already requested a kex, so do nothing.
}
}
func (t *handshakeTransport) kexLoop() {
write:
for t.getWriteError() == nil {
var request *pendingKex
var sent bool
for request == nil || !sent {
var ok bool
select {
case request, ok = <-t.startKex:
if !ok {
break write
}
case <-t.requestKex:
break
}
if !sent {
if err := t.sendKexInit(); err != nil {
t.recordWriteError(err)
break
}
sent = true
}
}
if err := t.getWriteError(); err != nil {
if request != nil {
request.done <- err
}
break
}
// We're not servicing t.requestKex, but that is OK:
// we never block on sending to t.requestKex.
// We're not servicing t.startKex, but the remote end
// has just sent us a kexInitMsg, so it can't send
// another key change request, until we close the done
// channel on the pendingKex request.
err := t.enterKeyExchange(request.otherInit)
t.mu.Lock()
t.writeError = err
t.sentInitPacket = nil
t.sentInitMsg = nil
t.writePacketsLeft = packetRekeyThreshold
if t.config.RekeyThreshold > 0 {
t.writeBytesLeft = int64(t.config.RekeyThreshold)
} else if t.algorithms != nil {
t.writeBytesLeft = t.algorithms.w.rekeyBytes()
}
// we have completed the key exchange. Since the
// reader is still blocked, it is safe to clear out
// the requestKex channel. This avoids the situation
// where: 1) we consumed our own request for the
// initial kex, and 2) the kex from the remote side
// caused another send on the requestKex channel,
clear:
for {
select {
case <-t.requestKex:
//
default:
break clear
}
}
request.done <- t.writeError
// kex finished. Push packets that we received while
// the kex was in progress. Don't look at t.startKex
// and don't increment writtenSinceKex: if we trigger
// another kex while we are still busy with the last
// one, things will become very confusing.
for _, p := range t.pendingPackets {
t.writeError = t.pushPacket(p)
if t.writeError != nil {
break
}
}
t.pendingPackets = t.pendingPackets[:0]
t.mu.Unlock()
}
// drain startKex channel. We don't service t.requestKex
// because nobody does blocking sends there.
go func() {
for init := range t.startKex {
init.done <- t.writeError
}
}()
// Unblock reader.
t.conn.Close()
}
// The protocol uses uint32 for packet counters, so we can't let them
// reach 1<<32. We will actually read and write more packets than
// this, though: the other side may send more packets, and after we
// hit this limit on writing we will send a few more packets for the
// key exchange itself.
const packetRekeyThreshold = (1 << 31)
func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) {
p, err := t.conn.readPacket()
if err != nil {
return nil, err
}
if t.readPacketsLeft > 0 {
t.readPacketsLeft--
} else {
t.requestKeyExchange()
}
if t.readBytesLeft > 0 {
t.readBytesLeft -= int64(len(p))
} else {
t.requestKeyExchange()
}
if debugHandshake {
t.printPacket(p, false)
}
if first && p[0] != msgKexInit {
return nil, fmt.Errorf("ssh: first packet should be msgKexInit")
}
if p[0] != msgKexInit {
return p, nil
}
firstKex := t.sessionID == nil
kex := pendingKex{
done: make(chan error, 1),
otherInit: p,
}
t.startKex <- &kex
err = <-kex.done
if debugHandshake {
log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err)
}
if err != nil {
return nil, err
}
t.readPacketsLeft = packetRekeyThreshold
if t.config.RekeyThreshold > 0 {
t.readBytesLeft = int64(t.config.RekeyThreshold)
} else {
t.readBytesLeft = t.algorithms.r.rekeyBytes()
}
// By default, a key exchange is hidden from higher layers by
// translating it into msgIgnore.
successPacket := []byte{msgIgnore}
if firstKex {
// sendKexInit() for the first kex waits for
// msgNewKeys so the authentication process is
// guaranteed to happen over an encrypted transport.
successPacket = []byte{msgNewKeys}
}
return successPacket, nil
}
// sendKexInit sends a key change message.
func (t *handshakeTransport) sendKexInit() error {
t.mu.Lock()
defer t.mu.Unlock()
if t.sentInitMsg != nil {
// kexInits may be sent either in response to the other side,
// or because our side wants to initiate a key change, so we
// may have already sent a kexInit. In that case, don't send a
// second kexInit.
return nil
}
msg := &kexInitMsg{
KexAlgos: t.config.KeyExchanges,
CiphersClientServer: t.config.Ciphers,
CiphersServerClient: t.config.Ciphers,
MACsClientServer: t.config.MACs,
MACsServerClient: t.config.MACs,
CompressionClientServer: supportedCompressions,
CompressionServerClient: supportedCompressions,
}
io.ReadFull(rand.Reader, msg.Cookie[:])
if len(t.hostKeys) > 0 {
for _, k := range t.hostKeys {
msg.ServerHostKeyAlgos = append(
msg.ServerHostKeyAlgos, k.PublicKey().Type())
}
} else {
msg.ServerHostKeyAlgos = t.hostKeyAlgorithms
}
packet := Marshal(msg)
// writePacket destroys the contents, so save a copy.
packetCopy := make([]byte, len(packet))
copy(packetCopy, packet)
if err := t.pushPacket(packetCopy); err != nil {
return err
}
t.sentInitMsg = msg
t.sentInitPacket = packet
return nil
}
func (t *handshakeTransport) writePacket(p []byte) error {
switch p[0] {
case msgKexInit:
return errors.New("ssh: only handshakeTransport can send kexInit")
case msgNewKeys:
return errors.New("ssh: only handshakeTransport can send newKeys")
}
t.mu.Lock()
defer t.mu.Unlock()
if t.writeError != nil {
return t.writeError
}
if t.sentInitMsg != nil {
// Copy the packet so the writer can reuse the buffer.
cp := make([]byte, len(p))
copy(cp, p)
t.pendingPackets = append(t.pendingPackets, cp)
return nil
}
if t.writeBytesLeft > 0 {
t.writeBytesLeft -= int64(len(p))
} else {
t.requestKeyExchange()
}
if t.writePacketsLeft > 0 {
t.writePacketsLeft--
} else {
t.requestKeyExchange()
}
if err := t.pushPacket(p); err != nil {
t.writeError = err
}
return nil
}
func (t *handshakeTransport) Close() error {
return t.conn.Close()
}
func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
if debugHandshake {
log.Printf("%s entered key exchange", t.id())
}
otherInit := &kexInitMsg{}
if err := Unmarshal(otherInitPacket, otherInit); err != nil {
return err
}
magics := handshakeMagics{
clientVersion: t.clientVersion,
serverVersion: t.serverVersion,
clientKexInit: otherInitPacket,
serverKexInit: t.sentInitPacket,
}
clientInit := otherInit
serverInit := t.sentInitMsg
if len(t.hostKeys) == 0 {
clientInit, serverInit = serverInit, clientInit
magics.clientKexInit = t.sentInitPacket
magics.serverKexInit = otherInitPacket
}
var err error
t.algorithms, err = findAgreedAlgorithms(clientInit, serverInit)
if err != nil {
return err
}
// We don't send FirstKexFollows, but we handle receiving it.
//
// RFC 4253 section 7 defines the kex and the agreement method for
// first_kex_packet_follows. It states that the guessed packet
// should be ignored if the "kex algorithm and/or the host
// key algorithm is guessed wrong (server and client have
// different preferred algorithm), or if any of the other
// algorithms cannot be agreed upon". The other algorithms have
// already been checked above so the kex algorithm and host key
// algorithm are checked here.
if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) {
// other side sent a kex message for the wrong algorithm,
// which we have to ignore.
if _, err := t.conn.readPacket(); err != nil {
return err
}
}
kex, ok := kexAlgoMap[t.algorithms.kex]
if !ok {
return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex)
}
var result *kexResult
if len(t.hostKeys) > 0 {
result, err = t.server(kex, t.algorithms, &magics)
} else {
result, err = t.client(kex, t.algorithms, &magics)
}
if err != nil {
return err
}
if t.sessionID == nil {
t.sessionID = result.H
}
result.SessionID = t.sessionID
if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil {
return err
}
if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil {
return err
}
if packet, err := t.conn.readPacket(); err != nil {
return err
} else if packet[0] != msgNewKeys {
return unexpectedMessageError(msgNewKeys, packet[0])
}
return nil
}
func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
var hostKey Signer
for _, k := range t.hostKeys {
if algs.hostKey == k.PublicKey().Type() {
hostKey = k
}
}
r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey)
return r, err
}
func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
result, err := kex.Client(t.conn, t.config.Rand, magics)
if err != nil {
return nil, err
}
hostKey, err := ParsePublicKey(result.HostKey)
if err != nil {
return nil, err
}
if err := verifyHostKeySignature(hostKey, result); err != nil {
return nil, err
}
err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey)
if err != nil {
return nil, err
}
return result, nil
}

540
vendor/golang.org/x/crypto/ssh/kex.go generated vendored Normal file
View File

@ -0,0 +1,540 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/subtle"
"errors"
"io"
"math/big"
"golang.org/x/crypto/curve25519"
)
const (
kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1"
kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1"
kexAlgoECDH256 = "ecdh-sha2-nistp256"
kexAlgoECDH384 = "ecdh-sha2-nistp384"
kexAlgoECDH521 = "ecdh-sha2-nistp521"
kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org"
)
// kexResult captures the outcome of a key exchange.
type kexResult struct {
// Session hash. See also RFC 4253, section 8.
H []byte
// Shared secret. See also RFC 4253, section 8.
K []byte
// Host key as hashed into H.
HostKey []byte
// Signature of H.
Signature []byte
// A cryptographic hash function that matches the security
// level of the key exchange algorithm. It is used for
// calculating H, and for deriving keys from H and K.
Hash crypto.Hash
// The session ID, which is the first H computed. This is used
// to derive key material inside the transport.
SessionID []byte
}
// handshakeMagics contains data that is always included in the
// session hash.
type handshakeMagics struct {
clientVersion, serverVersion []byte
clientKexInit, serverKexInit []byte
}
func (m *handshakeMagics) write(w io.Writer) {
writeString(w, m.clientVersion)
writeString(w, m.serverVersion)
writeString(w, m.clientKexInit)
writeString(w, m.serverKexInit)
}
// kexAlgorithm abstracts different key exchange algorithms.
type kexAlgorithm interface {
// Server runs server-side key agreement, signing the result
// with a hostkey.
Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error)
// Client runs the client-side key agreement. Caller is
// responsible for verifying the host key signature.
Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error)
}
// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement.
type dhGroup struct {
g, p, pMinus1 *big.Int
}
func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 {
return nil, errors.New("ssh: DH parameter out of bounds")
}
return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil
}
func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
hashFunc := crypto.SHA1
var x *big.Int
for {
var err error
if x, err = rand.Int(randSource, group.pMinus1); err != nil {
return nil, err
}
if x.Sign() > 0 {
break
}
}
X := new(big.Int).Exp(group.g, x, group.p)
kexDHInit := kexDHInitMsg{
X: X,
}
if err := c.writePacket(Marshal(&kexDHInit)); err != nil {
return nil, err
}
packet, err := c.readPacket()
if err != nil {
return nil, err
}
var kexDHReply kexDHReplyMsg
if err = Unmarshal(packet, &kexDHReply); err != nil {
return nil, err
}
kInt, err := group.diffieHellman(kexDHReply.Y, x)
if err != nil {
return nil, err
}
h := hashFunc.New()
magics.write(h)
writeString(h, kexDHReply.HostKey)
writeInt(h, X)
writeInt(h, kexDHReply.Y)
K := make([]byte, intLength(kInt))
marshalInt(K, kInt)
h.Write(K)
return &kexResult{
H: h.Sum(nil),
K: K,
HostKey: kexDHReply.HostKey,
Signature: kexDHReply.Signature,
Hash: crypto.SHA1,
}, nil
}
func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
hashFunc := crypto.SHA1
packet, err := c.readPacket()
if err != nil {
return
}
var kexDHInit kexDHInitMsg
if err = Unmarshal(packet, &kexDHInit); err != nil {
return
}
var y *big.Int
for {
if y, err = rand.Int(randSource, group.pMinus1); err != nil {
return
}
if y.Sign() > 0 {
break
}
}
Y := new(big.Int).Exp(group.g, y, group.p)
kInt, err := group.diffieHellman(kexDHInit.X, y)
if err != nil {
return nil, err
}
hostKeyBytes := priv.PublicKey().Marshal()
h := hashFunc.New()
magics.write(h)
writeString(h, hostKeyBytes)
writeInt(h, kexDHInit.X)
writeInt(h, Y)
K := make([]byte, intLength(kInt))
marshalInt(K, kInt)
h.Write(K)
H := h.Sum(nil)
// H is already a hash, but the hostkey signing will apply its
// own key-specific hash algorithm.
sig, err := signAndMarshal(priv, randSource, H)
if err != nil {
return nil, err
}
kexDHReply := kexDHReplyMsg{
HostKey: hostKeyBytes,
Y: Y,
Signature: sig,
}
packet = Marshal(&kexDHReply)
err = c.writePacket(packet)
return &kexResult{
H: H,
K: K,
HostKey: hostKeyBytes,
Signature: sig,
Hash: crypto.SHA1,
}, nil
}
// ecdh performs Elliptic Curve Diffie-Hellman key exchange as
// described in RFC 5656, section 4.
type ecdh struct {
curve elliptic.Curve
}
func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
if err != nil {
return nil, err
}
kexInit := kexECDHInitMsg{
ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y),
}
serialized := Marshal(&kexInit)
if err := c.writePacket(serialized); err != nil {
return nil, err
}
packet, err := c.readPacket()
if err != nil {
return nil, err
}
var reply kexECDHReplyMsg
if err = Unmarshal(packet, &reply); err != nil {
return nil, err
}
x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey)
if err != nil {
return nil, err
}
// generate shared secret
secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes())
h := ecHash(kex.curve).New()
magics.write(h)
writeString(h, reply.HostKey)
writeString(h, kexInit.ClientPubKey)
writeString(h, reply.EphemeralPubKey)
K := make([]byte, intLength(secret))
marshalInt(K, secret)
h.Write(K)
return &kexResult{
H: h.Sum(nil),
K: K,
HostKey: reply.HostKey,
Signature: reply.Signature,
Hash: ecHash(kex.curve),
}, nil
}
// unmarshalECKey parses and checks an EC key.
func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) {
x, y = elliptic.Unmarshal(curve, pubkey)
if x == nil {
return nil, nil, errors.New("ssh: elliptic.Unmarshal failure")
}
if !validateECPublicKey(curve, x, y) {
return nil, nil, errors.New("ssh: public key not on curve")
}
return x, y, nil
}
// validateECPublicKey checks that the point is a valid public key for
// the given curve. See [SEC1], 3.2.2
func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool {
if x.Sign() == 0 && y.Sign() == 0 {
return false
}
if x.Cmp(curve.Params().P) >= 0 {
return false
}
if y.Cmp(curve.Params().P) >= 0 {
return false
}
if !curve.IsOnCurve(x, y) {
return false
}
// We don't check if N * PubKey == 0, since
//
// - the NIST curves have cofactor = 1, so this is implicit.
// (We don't foresee an implementation that supports non NIST
// curves)
//
// - for ephemeral keys, we don't need to worry about small
// subgroup attacks.
return true
}
func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
packet, err := c.readPacket()
if err != nil {
return nil, err
}
var kexECDHInit kexECDHInitMsg
if err = Unmarshal(packet, &kexECDHInit); err != nil {
return nil, err
}
clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey)
if err != nil {
return nil, err
}
// We could cache this key across multiple users/multiple
// connection attempts, but the benefit is small. OpenSSH
// generates a new key for each incoming connection.
ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
if err != nil {
return nil, err
}
hostKeyBytes := priv.PublicKey().Marshal()
serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y)
// generate shared secret
secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes())
h := ecHash(kex.curve).New()
magics.write(h)
writeString(h, hostKeyBytes)
writeString(h, kexECDHInit.ClientPubKey)
writeString(h, serializedEphKey)
K := make([]byte, intLength(secret))
marshalInt(K, secret)
h.Write(K)
H := h.Sum(nil)
// H is already a hash, but the hostkey signing will apply its
// own key-specific hash algorithm.
sig, err := signAndMarshal(priv, rand, H)
if err != nil {
return nil, err
}
reply := kexECDHReplyMsg{
EphemeralPubKey: serializedEphKey,
HostKey: hostKeyBytes,
Signature: sig,
}
serialized := Marshal(&reply)
if err := c.writePacket(serialized); err != nil {
return nil, err
}
return &kexResult{
H: H,
K: K,
HostKey: reply.HostKey,
Signature: sig,
Hash: ecHash(kex.curve),
}, nil
}
var kexAlgoMap = map[string]kexAlgorithm{}
func init() {
// This is the group called diffie-hellman-group1-sha1 in RFC
// 4253 and Oakley Group 2 in RFC 2409.
p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16)
kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{
g: new(big.Int).SetInt64(2),
p: p,
pMinus1: new(big.Int).Sub(p, bigOne),
}
// This is the group called diffie-hellman-group14-sha1 in RFC
// 4253 and Oakley Group 14 in RFC 3526.
p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{
g: new(big.Int).SetInt64(2),
p: p,
pMinus1: new(big.Int).Sub(p, bigOne),
}
kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()}
kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()}
kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()}
kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{}
}
// curve25519sha256 implements the curve25519-sha256@libssh.org key
// agreement protocol, as described in
// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt
type curve25519sha256 struct{}
type curve25519KeyPair struct {
priv [32]byte
pub [32]byte
}
func (kp *curve25519KeyPair) generate(rand io.Reader) error {
if _, err := io.ReadFull(rand, kp.priv[:]); err != nil {
return err
}
curve25519.ScalarBaseMult(&kp.pub, &kp.priv)
return nil
}
// curve25519Zeros is just an array of 32 zero bytes so that we have something
// convenient to compare against in order to reject curve25519 points with the
// wrong order.
var curve25519Zeros [32]byte
func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
var kp curve25519KeyPair
if err := kp.generate(rand); err != nil {
return nil, err
}
if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil {
return nil, err
}
packet, err := c.readPacket()
if err != nil {
return nil, err
}
var reply kexECDHReplyMsg
if err = Unmarshal(packet, &reply); err != nil {
return nil, err
}
if len(reply.EphemeralPubKey) != 32 {
return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
}
var servPub, secret [32]byte
copy(servPub[:], reply.EphemeralPubKey)
curve25519.ScalarMult(&secret, &kp.priv, &servPub)
if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
}
h := crypto.SHA256.New()
magics.write(h)
writeString(h, reply.HostKey)
writeString(h, kp.pub[:])
writeString(h, reply.EphemeralPubKey)
kInt := new(big.Int).SetBytes(secret[:])
K := make([]byte, intLength(kInt))
marshalInt(K, kInt)
h.Write(K)
return &kexResult{
H: h.Sum(nil),
K: K,
HostKey: reply.HostKey,
Signature: reply.Signature,
Hash: crypto.SHA256,
}, nil
}
func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
packet, err := c.readPacket()
if err != nil {
return
}
var kexInit kexECDHInitMsg
if err = Unmarshal(packet, &kexInit); err != nil {
return
}
if len(kexInit.ClientPubKey) != 32 {
return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
}
var kp curve25519KeyPair
if err := kp.generate(rand); err != nil {
return nil, err
}
var clientPub, secret [32]byte
copy(clientPub[:], kexInit.ClientPubKey)
curve25519.ScalarMult(&secret, &kp.priv, &clientPub)
if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
}
hostKeyBytes := priv.PublicKey().Marshal()
h := crypto.SHA256.New()
magics.write(h)
writeString(h, hostKeyBytes)
writeString(h, kexInit.ClientPubKey)
writeString(h, kp.pub[:])
kInt := new(big.Int).SetBytes(secret[:])
K := make([]byte, intLength(kInt))
marshalInt(K, kInt)
h.Write(K)
H := h.Sum(nil)
sig, err := signAndMarshal(priv, rand, H)
if err != nil {
return nil, err
}
reply := kexECDHReplyMsg{
EphemeralPubKey: kp.pub[:],
HostKey: hostKeyBytes,
Signature: sig,
}
if err := c.writePacket(Marshal(&reply)); err != nil {
return nil, err
}
return &kexResult{
H: H,
K: K,
HostKey: hostKeyBytes,
Signature: sig,
Hash: crypto.SHA256,
}, nil
}

905
vendor/golang.org/x/crypto/ssh/keys.go generated vendored Normal file
View File

@ -0,0 +1,905 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"bytes"
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/md5"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/asn1"
"encoding/base64"
"encoding/hex"
"encoding/pem"
"errors"
"fmt"
"io"
"math/big"
"strings"
"golang.org/x/crypto/ed25519"
)
// These constants represent the algorithm names for key types supported by this
// package.
const (
KeyAlgoRSA = "ssh-rsa"
KeyAlgoDSA = "ssh-dss"
KeyAlgoECDSA256 = "ecdsa-sha2-nistp256"
KeyAlgoECDSA384 = "ecdsa-sha2-nistp384"
KeyAlgoECDSA521 = "ecdsa-sha2-nistp521"
KeyAlgoED25519 = "ssh-ed25519"
)
// parsePubKey parses a public key of the given algorithm.
// Use ParsePublicKey for keys with prepended algorithm.
func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) {
switch algo {
case KeyAlgoRSA:
return parseRSA(in)
case KeyAlgoDSA:
return parseDSA(in)
case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521:
return parseECDSA(in)
case KeyAlgoED25519:
return parseED25519(in)
case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01:
cert, err := parseCert(in, certToPrivAlgo(algo))
if err != nil {
return nil, nil, err
}
return cert, nil, nil
}
return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo)
}
// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format
// (see sshd(8) manual page) once the options and key type fields have been
// removed.
func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) {
in = bytes.TrimSpace(in)
i := bytes.IndexAny(in, " \t")
if i == -1 {
i = len(in)
}
base64Key := in[:i]
key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key)))
n, err := base64.StdEncoding.Decode(key, base64Key)
if err != nil {
return nil, "", err
}
key = key[:n]
out, err = ParsePublicKey(key)
if err != nil {
return nil, "", err
}
comment = string(bytes.TrimSpace(in[i:]))
return out, comment, nil
}
// ParseKnownHosts parses an entry in the format of the known_hosts file.
//
// The known_hosts format is documented in the sshd(8) manual page. This
// function will parse a single entry from in. On successful return, marker
// will contain the optional marker value (i.e. "cert-authority" or "revoked")
// or else be empty, hosts will contain the hosts that this entry matches,
// pubKey will contain the public key and comment will contain any trailing
// comment at the end of the line. See the sshd(8) manual page for the various
// forms that a host string can take.
//
// The unparsed remainder of the input will be returned in rest. This function
// can be called repeatedly to parse multiple entries.
//
// If no entries were found in the input then err will be io.EOF. Otherwise a
// non-nil err value indicates a parse error.
func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) {
for len(in) > 0 {
end := bytes.IndexByte(in, '\n')
if end != -1 {
rest = in[end+1:]
in = in[:end]
} else {
rest = nil
}
end = bytes.IndexByte(in, '\r')
if end != -1 {
in = in[:end]
}
in = bytes.TrimSpace(in)
if len(in) == 0 || in[0] == '#' {
in = rest
continue
}
i := bytes.IndexAny(in, " \t")
if i == -1 {
in = rest
continue
}
// Strip out the beginning of the known_host key.
// This is either an optional marker or a (set of) hostname(s).
keyFields := bytes.Fields(in)
if len(keyFields) < 3 || len(keyFields) > 5 {
return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data")
}
// keyFields[0] is either "@cert-authority", "@revoked" or a comma separated
// list of hosts
marker := ""
if keyFields[0][0] == '@' {
marker = string(keyFields[0][1:])
keyFields = keyFields[1:]
}
hosts := string(keyFields[0])
// keyFields[1] contains the key type (e.g. “ssh-rsa”).
// However, that information is duplicated inside the
// base64-encoded key and so is ignored here.
key := bytes.Join(keyFields[2:], []byte(" "))
if pubKey, comment, err = parseAuthorizedKey(key); err != nil {
return "", nil, nil, "", nil, err
}
return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil
}
return "", nil, nil, "", nil, io.EOF
}
// ParseAuthorizedKeys parses a public key from an authorized_keys
// file used in OpenSSH according to the sshd(8) manual page.
func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) {
for len(in) > 0 {
end := bytes.IndexByte(in, '\n')
if end != -1 {
rest = in[end+1:]
in = in[:end]
} else {
rest = nil
}
end = bytes.IndexByte(in, '\r')
if end != -1 {
in = in[:end]
}
in = bytes.TrimSpace(in)
if len(in) == 0 || in[0] == '#' {
in = rest
continue
}
i := bytes.IndexAny(in, " \t")
if i == -1 {
in = rest
continue
}
if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
return out, comment, options, rest, nil
}
// No key type recognised. Maybe there's an options field at
// the beginning.
var b byte
inQuote := false
var candidateOptions []string
optionStart := 0
for i, b = range in {
isEnd := !inQuote && (b == ' ' || b == '\t')
if (b == ',' && !inQuote) || isEnd {
if i-optionStart > 0 {
candidateOptions = append(candidateOptions, string(in[optionStart:i]))
}
optionStart = i + 1
}
if isEnd {
break
}
if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) {
inQuote = !inQuote
}
}
for i < len(in) && (in[i] == ' ' || in[i] == '\t') {
i++
}
if i == len(in) {
// Invalid line: unmatched quote
in = rest
continue
}
in = in[i:]
i = bytes.IndexAny(in, " \t")
if i == -1 {
in = rest
continue
}
if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
options = candidateOptions
return out, comment, options, rest, nil
}
in = rest
continue
}
return nil, "", nil, nil, errors.New("ssh: no key found")
}
// ParsePublicKey parses an SSH public key formatted for use in
// the SSH wire protocol according to RFC 4253, section 6.6.
func ParsePublicKey(in []byte) (out PublicKey, err error) {
algo, in, ok := parseString(in)
if !ok {
return nil, errShortRead
}
var rest []byte
out, rest, err = parsePubKey(in, string(algo))
if len(rest) > 0 {
return nil, errors.New("ssh: trailing junk in public key")
}
return out, err
}
// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH
// authorized_keys file. The return value ends with newline.
func MarshalAuthorizedKey(key PublicKey) []byte {
b := &bytes.Buffer{}
b.WriteString(key.Type())
b.WriteByte(' ')
e := base64.NewEncoder(base64.StdEncoding, b)
e.Write(key.Marshal())
e.Close()
b.WriteByte('\n')
return b.Bytes()
}
// PublicKey is an abstraction of different types of public keys.
type PublicKey interface {
// Type returns the key's type, e.g. "ssh-rsa".
Type() string
// Marshal returns the serialized key data in SSH wire format,
// with the name prefix.
Marshal() []byte
// Verify that sig is a signature on the given data using this
// key. This function will hash the data appropriately first.
Verify(data []byte, sig *Signature) error
}
// CryptoPublicKey, if implemented by a PublicKey,
// returns the underlying crypto.PublicKey form of the key.
type CryptoPublicKey interface {
CryptoPublicKey() crypto.PublicKey
}
// A Signer can create signatures that verify against a public key.
type Signer interface {
// PublicKey returns an associated PublicKey instance.
PublicKey() PublicKey
// Sign returns raw signature for the given data. This method
// will apply the hash specified for the keytype to the data.
Sign(rand io.Reader, data []byte) (*Signature, error)
}
type rsaPublicKey rsa.PublicKey
func (r *rsaPublicKey) Type() string {
return "ssh-rsa"
}
// parseRSA parses an RSA key according to RFC 4253, section 6.6.
func parseRSA(in []byte) (out PublicKey, rest []byte, err error) {
var w struct {
E *big.Int
N *big.Int
Rest []byte `ssh:"rest"`
}
if err := Unmarshal(in, &w); err != nil {
return nil, nil, err
}
if w.E.BitLen() > 24 {
return nil, nil, errors.New("ssh: exponent too large")
}
e := w.E.Int64()
if e < 3 || e&1 == 0 {
return nil, nil, errors.New("ssh: incorrect exponent")
}
var key rsa.PublicKey
key.E = int(e)
key.N = w.N
return (*rsaPublicKey)(&key), w.Rest, nil
}
func (r *rsaPublicKey) Marshal() []byte {
e := new(big.Int).SetInt64(int64(r.E))
// RSA publickey struct layout should match the struct used by
// parseRSACert in the x/crypto/ssh/agent package.
wirekey := struct {
Name string
E *big.Int
N *big.Int
}{
KeyAlgoRSA,
e,
r.N,
}
return Marshal(&wirekey)
}
func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != r.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
}
h := crypto.SHA1.New()
h.Write(data)
digest := h.Sum(nil)
return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob)
}
func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
return (*rsa.PublicKey)(r)
}
type dsaPublicKey dsa.PublicKey
func (r *dsaPublicKey) Type() string {
return "ssh-dss"
}
// parseDSA parses an DSA key according to RFC 4253, section 6.6.
func parseDSA(in []byte) (out PublicKey, rest []byte, err error) {
var w struct {
P, Q, G, Y *big.Int
Rest []byte `ssh:"rest"`
}
if err := Unmarshal(in, &w); err != nil {
return nil, nil, err
}
key := &dsaPublicKey{
Parameters: dsa.Parameters{
P: w.P,
Q: w.Q,
G: w.G,
},
Y: w.Y,
}
return key, w.Rest, nil
}
func (k *dsaPublicKey) Marshal() []byte {
// DSA publickey struct layout should match the struct used by
// parseDSACert in the x/crypto/ssh/agent package.
w := struct {
Name string
P, Q, G, Y *big.Int
}{
k.Type(),
k.P,
k.Q,
k.G,
k.Y,
}
return Marshal(&w)
}
func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
h := crypto.SHA1.New()
h.Write(data)
digest := h.Sum(nil)
// Per RFC 4253, section 6.6,
// The value for 'dss_signature_blob' is encoded as a string containing
// r, followed by s (which are 160-bit integers, without lengths or
// padding, unsigned, and in network byte order).
// For DSS purposes, sig.Blob should be exactly 40 bytes in length.
if len(sig.Blob) != 40 {
return errors.New("ssh: DSA signature parse error")
}
r := new(big.Int).SetBytes(sig.Blob[:20])
s := new(big.Int).SetBytes(sig.Blob[20:])
if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) {
return nil
}
return errors.New("ssh: signature did not verify")
}
func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey {
return (*dsa.PublicKey)(k)
}
type dsaPrivateKey struct {
*dsa.PrivateKey
}
func (k *dsaPrivateKey) PublicKey() PublicKey {
return (*dsaPublicKey)(&k.PrivateKey.PublicKey)
}
func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) {
h := crypto.SHA1.New()
h.Write(data)
digest := h.Sum(nil)
r, s, err := dsa.Sign(rand, k.PrivateKey, digest)
if err != nil {
return nil, err
}
sig := make([]byte, 40)
rb := r.Bytes()
sb := s.Bytes()
copy(sig[20-len(rb):20], rb)
copy(sig[40-len(sb):], sb)
return &Signature{
Format: k.PublicKey().Type(),
Blob: sig,
}, nil
}
type ecdsaPublicKey ecdsa.PublicKey
func (key *ecdsaPublicKey) Type() string {
return "ecdsa-sha2-" + key.nistID()
}
func (key *ecdsaPublicKey) nistID() string {
switch key.Params().BitSize {
case 256:
return "nistp256"
case 384:
return "nistp384"
case 521:
return "nistp521"
}
panic("ssh: unsupported ecdsa key size")
}
type ed25519PublicKey ed25519.PublicKey
func (key ed25519PublicKey) Type() string {
return KeyAlgoED25519
}
func parseED25519(in []byte) (out PublicKey, rest []byte, err error) {
var w struct {
KeyBytes []byte
Rest []byte `ssh:"rest"`
}
if err := Unmarshal(in, &w); err != nil {
return nil, nil, err
}
key := ed25519.PublicKey(w.KeyBytes)
return (ed25519PublicKey)(key), w.Rest, nil
}
func (key ed25519PublicKey) Marshal() []byte {
w := struct {
Name string
KeyBytes []byte
}{
KeyAlgoED25519,
[]byte(key),
}
return Marshal(&w)
}
func (key ed25519PublicKey) Verify(b []byte, sig *Signature) error {
if sig.Format != key.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type())
}
edKey := (ed25519.PublicKey)(key)
if ok := ed25519.Verify(edKey, b, sig.Blob); !ok {
return errors.New("ssh: signature did not verify")
}
return nil
}
func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey {
return ed25519.PublicKey(k)
}
func supportedEllipticCurve(curve elliptic.Curve) bool {
return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521()
}
// ecHash returns the hash to match the given elliptic curve, see RFC
// 5656, section 6.2.1
func ecHash(curve elliptic.Curve) crypto.Hash {
bitSize := curve.Params().BitSize
switch {
case bitSize <= 256:
return crypto.SHA256
case bitSize <= 384:
return crypto.SHA384
}
return crypto.SHA512
}
// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1.
func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) {
var w struct {
Curve string
KeyBytes []byte
Rest []byte `ssh:"rest"`
}
if err := Unmarshal(in, &w); err != nil {
return nil, nil, err
}
key := new(ecdsa.PublicKey)
switch w.Curve {
case "nistp256":
key.Curve = elliptic.P256()
case "nistp384":
key.Curve = elliptic.P384()
case "nistp521":
key.Curve = elliptic.P521()
default:
return nil, nil, errors.New("ssh: unsupported curve")
}
key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes)
if key.X == nil || key.Y == nil {
return nil, nil, errors.New("ssh: invalid curve point")
}
return (*ecdsaPublicKey)(key), w.Rest, nil
}
func (key *ecdsaPublicKey) Marshal() []byte {
// See RFC 5656, section 3.1.
keyBytes := elliptic.Marshal(key.Curve, key.X, key.Y)
// ECDSA publickey struct layout should match the struct used by
// parseECDSACert in the x/crypto/ssh/agent package.
w := struct {
Name string
ID string
Key []byte
}{
key.Type(),
key.nistID(),
keyBytes,
}
return Marshal(&w)
}
func (key *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != key.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type())
}
h := ecHash(key.Curve).New()
h.Write(data)
digest := h.Sum(nil)
// Per RFC 5656, section 3.1.2,
// The ecdsa_signature_blob value has the following specific encoding:
// mpint r
// mpint s
var ecSig struct {
R *big.Int
S *big.Int
}
if err := Unmarshal(sig.Blob, &ecSig); err != nil {
return err
}
if ecdsa.Verify((*ecdsa.PublicKey)(key), digest, ecSig.R, ecSig.S) {
return nil
}
return errors.New("ssh: signature did not verify")
}
func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey {
return (*ecdsa.PublicKey)(k)
}
// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey,
// *ecdsa.PrivateKey or any other crypto.Signer and returns a corresponding
// Signer instance. ECDSA keys must use P-256, P-384 or P-521.
func NewSignerFromKey(key interface{}) (Signer, error) {
switch key := key.(type) {
case crypto.Signer:
return NewSignerFromSigner(key)
case *dsa.PrivateKey:
return &dsaPrivateKey{key}, nil
default:
return nil, fmt.Errorf("ssh: unsupported key type %T", key)
}
}
type wrappedSigner struct {
signer crypto.Signer
pubKey PublicKey
}
// NewSignerFromSigner takes any crypto.Signer implementation and
// returns a corresponding Signer interface. This can be used, for
// example, with keys kept in hardware modules.
func NewSignerFromSigner(signer crypto.Signer) (Signer, error) {
pubKey, err := NewPublicKey(signer.Public())
if err != nil {
return nil, err
}
return &wrappedSigner{signer, pubKey}, nil
}
func (s *wrappedSigner) PublicKey() PublicKey {
return s.pubKey
}
func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
var hashFunc crypto.Hash
switch key := s.pubKey.(type) {
case *rsaPublicKey, *dsaPublicKey:
hashFunc = crypto.SHA1
case *ecdsaPublicKey:
hashFunc = ecHash(key.Curve)
case ed25519PublicKey:
default:
return nil, fmt.Errorf("ssh: unsupported key type %T", key)
}
var digest []byte
if hashFunc != 0 {
h := hashFunc.New()
h.Write(data)
digest = h.Sum(nil)
} else {
digest = data
}
signature, err := s.signer.Sign(rand, digest, hashFunc)
if err != nil {
return nil, err
}
// crypto.Signer.Sign is expected to return an ASN.1-encoded signature
// for ECDSA and DSA, but that's not the encoding expected by SSH, so
// re-encode.
switch s.pubKey.(type) {
case *ecdsaPublicKey, *dsaPublicKey:
type asn1Signature struct {
R, S *big.Int
}
asn1Sig := new(asn1Signature)
_, err := asn1.Unmarshal(signature, asn1Sig)
if err != nil {
return nil, err
}
switch s.pubKey.(type) {
case *ecdsaPublicKey:
signature = Marshal(asn1Sig)
case *dsaPublicKey:
signature = make([]byte, 40)
r := asn1Sig.R.Bytes()
s := asn1Sig.S.Bytes()
copy(signature[20-len(r):20], r)
copy(signature[40-len(s):40], s)
}
}
return &Signature{
Format: s.pubKey.Type(),
Blob: signature,
}, nil
}
// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey,
// or ed25519.PublicKey returns a corresponding PublicKey instance.
// ECDSA keys must use P-256, P-384 or P-521.
func NewPublicKey(key interface{}) (PublicKey, error) {
switch key := key.(type) {
case *rsa.PublicKey:
return (*rsaPublicKey)(key), nil
case *ecdsa.PublicKey:
if !supportedEllipticCurve(key.Curve) {
return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported.")
}
return (*ecdsaPublicKey)(key), nil
case *dsa.PublicKey:
return (*dsaPublicKey)(key), nil
case ed25519.PublicKey:
return (ed25519PublicKey)(key), nil
default:
return nil, fmt.Errorf("ssh: unsupported key type %T", key)
}
}
// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports
// the same keys as ParseRawPrivateKey.
func ParsePrivateKey(pemBytes []byte) (Signer, error) {
key, err := ParseRawPrivateKey(pemBytes)
if err != nil {
return nil, err
}
return NewSignerFromKey(key)
}
// encryptedBlock tells whether a private key is
// encrypted by examining its Proc-Type header
// for a mention of ENCRYPTED
// according to RFC 1421 Section 4.6.1.1.
func encryptedBlock(block *pem.Block) bool {
return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED")
}
// ParseRawPrivateKey returns a private key from a PEM encoded private key. It
// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys.
func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
block, _ := pem.Decode(pemBytes)
if block == nil {
return nil, errors.New("ssh: no key found")
}
if encryptedBlock(block) {
return nil, errors.New("ssh: cannot decode encrypted private keys")
}
switch block.Type {
case "RSA PRIVATE KEY":
return x509.ParsePKCS1PrivateKey(block.Bytes)
case "EC PRIVATE KEY":
return x509.ParseECPrivateKey(block.Bytes)
case "DSA PRIVATE KEY":
return ParseDSAPrivateKey(block.Bytes)
case "OPENSSH PRIVATE KEY":
return parseOpenSSHPrivateKey(block.Bytes)
default:
return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type)
}
}
// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as
// specified by the OpenSSL DSA man page.
func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) {
var k struct {
Version int
P *big.Int
Q *big.Int
G *big.Int
Pub *big.Int
Priv *big.Int
}
rest, err := asn1.Unmarshal(der, &k)
if err != nil {
return nil, errors.New("ssh: failed to parse DSA key: " + err.Error())
}
if len(rest) > 0 {
return nil, errors.New("ssh: garbage after DSA key")
}
return &dsa.PrivateKey{
PublicKey: dsa.PublicKey{
Parameters: dsa.Parameters{
P: k.P,
Q: k.Q,
G: k.G,
},
Y: k.Pub,
},
X: k.Priv,
}, nil
}
// Implemented based on the documentation at
// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
func parseOpenSSHPrivateKey(key []byte) (*ed25519.PrivateKey, error) {
magic := append([]byte("openssh-key-v1"), 0)
if !bytes.Equal(magic, key[0:len(magic)]) {
return nil, errors.New("ssh: invalid openssh private key format")
}
remaining := key[len(magic):]
var w struct {
CipherName string
KdfName string
KdfOpts string
NumKeys uint32
PubKey []byte
PrivKeyBlock []byte
}
if err := Unmarshal(remaining, &w); err != nil {
return nil, err
}
pk1 := struct {
Check1 uint32
Check2 uint32
Keytype string
Pub []byte
Priv []byte
Comment string
Pad []byte `ssh:"rest"`
}{}
if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil {
return nil, err
}
if pk1.Check1 != pk1.Check2 {
return nil, errors.New("ssh: checkint mismatch")
}
// we only handle ed25519 keys currently
if pk1.Keytype != KeyAlgoED25519 {
return nil, errors.New("ssh: unhandled key type")
}
for i, b := range pk1.Pad {
if int(b) != i+1 {
return nil, errors.New("ssh: padding not as expected")
}
}
if len(pk1.Priv) != ed25519.PrivateKeySize {
return nil, errors.New("ssh: private key unexpected length")
}
pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize))
copy(pk, pk1.Priv)
return &pk, nil
}
// FingerprintLegacyMD5 returns the user presentation of the key's
// fingerprint as described by RFC 4716 section 4.
func FingerprintLegacyMD5(pubKey PublicKey) string {
md5sum := md5.Sum(pubKey.Marshal())
hexarray := make([]string, len(md5sum))
for i, c := range md5sum {
hexarray[i] = hex.EncodeToString([]byte{c})
}
return strings.Join(hexarray, ":")
}
// FingerprintSHA256 returns the user presentation of the key's
// fingerprint as unpadded base64 encoded sha256 hash.
// This format was introduced from OpenSSH 6.8.
// https://www.openssh.com/txt/release-6.8
// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding)
func FingerprintSHA256(pubKey PublicKey) string {
sha256sum := sha256.Sum256(pubKey.Marshal())
hash := base64.RawStdEncoding.EncodeToString(sha256sum[:])
return "SHA256:" + hash
}

61
vendor/golang.org/x/crypto/ssh/mac.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
// Message authentication support
import (
"crypto/hmac"
"crypto/sha1"
"crypto/sha256"
"hash"
)
type macMode struct {
keySize int
etm bool
new func(key []byte) hash.Hash
}
// truncatingMAC wraps around a hash.Hash and truncates the output digest to
// a given size.
type truncatingMAC struct {
length int
hmac hash.Hash
}
func (t truncatingMAC) Write(data []byte) (int, error) {
return t.hmac.Write(data)
}
func (t truncatingMAC) Sum(in []byte) []byte {
out := t.hmac.Sum(in)
return out[:len(in)+t.length]
}
func (t truncatingMAC) Reset() {
t.hmac.Reset()
}
func (t truncatingMAC) Size() int {
return t.length
}
func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
var macModes = map[string]*macMode{
"hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash {
return hmac.New(sha256.New, key)
}},
"hmac-sha2-256": {32, false, func(key []byte) hash.Hash {
return hmac.New(sha256.New, key)
}},
"hmac-sha1": {20, false, func(key []byte) hash.Hash {
return hmac.New(sha1.New, key)
}},
"hmac-sha1-96": {20, false, func(key []byte) hash.Hash {
return truncatingMAC{12, hmac.New(sha1.New, key)}
}},
}

758
vendor/golang.org/x/crypto/ssh/messages.go generated vendored Normal file
View File

@ -0,0 +1,758 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
"reflect"
"strconv"
"strings"
)
// These are SSH message type numbers. They are scattered around several
// documents but many were taken from [SSH-PARAMETERS].
const (
msgIgnore = 2
msgUnimplemented = 3
msgDebug = 4
msgNewKeys = 21
// Standard authentication messages
msgUserAuthSuccess = 52
msgUserAuthBanner = 53
)
// SSH messages:
//
// These structures mirror the wire format of the corresponding SSH messages.
// They are marshaled using reflection with the marshal and unmarshal functions
// in this file. The only wrinkle is that a final member of type []byte with a
// ssh tag of "rest" receives the remainder of a packet when unmarshaling.
// See RFC 4253, section 11.1.
const msgDisconnect = 1
// disconnectMsg is the message that signals a disconnect. It is also
// the error type returned from mux.Wait()
type disconnectMsg struct {
Reason uint32 `sshtype:"1"`
Message string
Language string
}
func (d *disconnectMsg) Error() string {
return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message)
}
// See RFC 4253, section 7.1.
const msgKexInit = 20
type kexInitMsg struct {
Cookie [16]byte `sshtype:"20"`
KexAlgos []string
ServerHostKeyAlgos []string
CiphersClientServer []string
CiphersServerClient []string
MACsClientServer []string
MACsServerClient []string
CompressionClientServer []string
CompressionServerClient []string
LanguagesClientServer []string
LanguagesServerClient []string
FirstKexFollows bool
Reserved uint32
}
// See RFC 4253, section 8.
// Diffie-Helman
const msgKexDHInit = 30
type kexDHInitMsg struct {
X *big.Int `sshtype:"30"`
}
const msgKexECDHInit = 30
type kexECDHInitMsg struct {
ClientPubKey []byte `sshtype:"30"`
}
const msgKexECDHReply = 31
type kexECDHReplyMsg struct {
HostKey []byte `sshtype:"31"`
EphemeralPubKey []byte
Signature []byte
}
const msgKexDHReply = 31
type kexDHReplyMsg struct {
HostKey []byte `sshtype:"31"`
Y *big.Int
Signature []byte
}
// See RFC 4253, section 10.
const msgServiceRequest = 5
type serviceRequestMsg struct {
Service string `sshtype:"5"`
}
// See RFC 4253, section 10.
const msgServiceAccept = 6
type serviceAcceptMsg struct {
Service string `sshtype:"6"`
}
// See RFC 4252, section 5.
const msgUserAuthRequest = 50
type userAuthRequestMsg struct {
User string `sshtype:"50"`
Service string
Method string
Payload []byte `ssh:"rest"`
}
// Used for debug printouts of packets.
type userAuthSuccessMsg struct {
}
// See RFC 4252, section 5.1
const msgUserAuthFailure = 51
type userAuthFailureMsg struct {
Methods []string `sshtype:"51"`
PartialSuccess bool
}
// See RFC 4256, section 3.2
const msgUserAuthInfoRequest = 60
const msgUserAuthInfoResponse = 61
type userAuthInfoRequestMsg struct {
User string `sshtype:"60"`
Instruction string
DeprecatedLanguage string
NumPrompts uint32
Prompts []byte `ssh:"rest"`
}
// See RFC 4254, section 5.1.
const msgChannelOpen = 90
type channelOpenMsg struct {
ChanType string `sshtype:"90"`
PeersId uint32
PeersWindow uint32
MaxPacketSize uint32
TypeSpecificData []byte `ssh:"rest"`
}
const msgChannelExtendedData = 95
const msgChannelData = 94
// Used for debug print outs of packets.
type channelDataMsg struct {
PeersId uint32 `sshtype:"94"`
Length uint32
Rest []byte `ssh:"rest"`
}
// See RFC 4254, section 5.1.
const msgChannelOpenConfirm = 91
type channelOpenConfirmMsg struct {
PeersId uint32 `sshtype:"91"`
MyId uint32
MyWindow uint32
MaxPacketSize uint32
TypeSpecificData []byte `ssh:"rest"`
}
// See RFC 4254, section 5.1.
const msgChannelOpenFailure = 92
type channelOpenFailureMsg struct {
PeersId uint32 `sshtype:"92"`
Reason RejectionReason
Message string
Language string
}
const msgChannelRequest = 98
type channelRequestMsg struct {
PeersId uint32 `sshtype:"98"`
Request string
WantReply bool
RequestSpecificData []byte `ssh:"rest"`
}
// See RFC 4254, section 5.4.
const msgChannelSuccess = 99
type channelRequestSuccessMsg struct {
PeersId uint32 `sshtype:"99"`
}
// See RFC 4254, section 5.4.
const msgChannelFailure = 100
type channelRequestFailureMsg struct {
PeersId uint32 `sshtype:"100"`
}
// See RFC 4254, section 5.3
const msgChannelClose = 97
type channelCloseMsg struct {
PeersId uint32 `sshtype:"97"`
}
// See RFC 4254, section 5.3
const msgChannelEOF = 96
type channelEOFMsg struct {
PeersId uint32 `sshtype:"96"`
}
// See RFC 4254, section 4
const msgGlobalRequest = 80
type globalRequestMsg struct {
Type string `sshtype:"80"`
WantReply bool
Data []byte `ssh:"rest"`
}
// See RFC 4254, section 4
const msgRequestSuccess = 81
type globalRequestSuccessMsg struct {
Data []byte `ssh:"rest" sshtype:"81"`
}
// See RFC 4254, section 4
const msgRequestFailure = 82
type globalRequestFailureMsg struct {
Data []byte `ssh:"rest" sshtype:"82"`
}
// See RFC 4254, section 5.2
const msgChannelWindowAdjust = 93
type windowAdjustMsg struct {
PeersId uint32 `sshtype:"93"`
AdditionalBytes uint32
}
// See RFC 4252, section 7
const msgUserAuthPubKeyOk = 60
type userAuthPubKeyOkMsg struct {
Algo string `sshtype:"60"`
PubKey []byte
}
// typeTags returns the possible type bytes for the given reflect.Type, which
// should be a struct. The possible values are separated by a '|' character.
func typeTags(structType reflect.Type) (tags []byte) {
tagStr := structType.Field(0).Tag.Get("sshtype")
for _, tag := range strings.Split(tagStr, "|") {
i, err := strconv.Atoi(tag)
if err == nil {
tags = append(tags, byte(i))
}
}
return tags
}
func fieldError(t reflect.Type, field int, problem string) error {
if problem != "" {
problem = ": " + problem
}
return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem)
}
var errShortRead = errors.New("ssh: short read")
// Unmarshal parses data in SSH wire format into a structure. The out
// argument should be a pointer to struct. If the first member of the
// struct has the "sshtype" tag set to a '|'-separated set of numbers
// in decimal, the packet must start with one of those numbers. In
// case of error, Unmarshal returns a ParseError or
// UnexpectedMessageError.
func Unmarshal(data []byte, out interface{}) error {
v := reflect.ValueOf(out).Elem()
structType := v.Type()
expectedTypes := typeTags(structType)
var expectedType byte
if len(expectedTypes) > 0 {
expectedType = expectedTypes[0]
}
if len(data) == 0 {
return parseError(expectedType)
}
if len(expectedTypes) > 0 {
goodType := false
for _, e := range expectedTypes {
if e > 0 && data[0] == e {
goodType = true
break
}
}
if !goodType {
return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes)
}
data = data[1:]
}
var ok bool
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
t := field.Type()
switch t.Kind() {
case reflect.Bool:
if len(data) < 1 {
return errShortRead
}
field.SetBool(data[0] != 0)
data = data[1:]
case reflect.Array:
if t.Elem().Kind() != reflect.Uint8 {
return fieldError(structType, i, "array of unsupported type")
}
if len(data) < t.Len() {
return errShortRead
}
for j, n := 0, t.Len(); j < n; j++ {
field.Index(j).Set(reflect.ValueOf(data[j]))
}
data = data[t.Len():]
case reflect.Uint64:
var u64 uint64
if u64, data, ok = parseUint64(data); !ok {
return errShortRead
}
field.SetUint(u64)
case reflect.Uint32:
var u32 uint32
if u32, data, ok = parseUint32(data); !ok {
return errShortRead
}
field.SetUint(uint64(u32))
case reflect.Uint8:
if len(data) < 1 {
return errShortRead
}
field.SetUint(uint64(data[0]))
data = data[1:]
case reflect.String:
var s []byte
if s, data, ok = parseString(data); !ok {
return fieldError(structType, i, "")
}
field.SetString(string(s))
case reflect.Slice:
switch t.Elem().Kind() {
case reflect.Uint8:
if structType.Field(i).Tag.Get("ssh") == "rest" {
field.Set(reflect.ValueOf(data))
data = nil
} else {
var s []byte
if s, data, ok = parseString(data); !ok {
return errShortRead
}
field.Set(reflect.ValueOf(s))
}
case reflect.String:
var nl []string
if nl, data, ok = parseNameList(data); !ok {
return errShortRead
}
field.Set(reflect.ValueOf(nl))
default:
return fieldError(structType, i, "slice of unsupported type")
}
case reflect.Ptr:
if t == bigIntType {
var n *big.Int
if n, data, ok = parseInt(data); !ok {
return errShortRead
}
field.Set(reflect.ValueOf(n))
} else {
return fieldError(structType, i, "pointer to unsupported type")
}
default:
return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t))
}
}
if len(data) != 0 {
return parseError(expectedType)
}
return nil
}
// Marshal serializes the message in msg to SSH wire format. The msg
// argument should be a struct or pointer to struct. If the first
// member has the "sshtype" tag set to a number in decimal, that
// number is prepended to the result. If the last of member has the
// "ssh" tag set to "rest", its contents are appended to the output.
func Marshal(msg interface{}) []byte {
out := make([]byte, 0, 64)
return marshalStruct(out, msg)
}
func marshalStruct(out []byte, msg interface{}) []byte {
v := reflect.Indirect(reflect.ValueOf(msg))
msgTypes := typeTags(v.Type())
if len(msgTypes) > 0 {
out = append(out, msgTypes[0])
}
for i, n := 0, v.NumField(); i < n; i++ {
field := v.Field(i)
switch t := field.Type(); t.Kind() {
case reflect.Bool:
var v uint8
if field.Bool() {
v = 1
}
out = append(out, v)
case reflect.Array:
if t.Elem().Kind() != reflect.Uint8 {
panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface()))
}
for j, l := 0, t.Len(); j < l; j++ {
out = append(out, uint8(field.Index(j).Uint()))
}
case reflect.Uint32:
out = appendU32(out, uint32(field.Uint()))
case reflect.Uint64:
out = appendU64(out, uint64(field.Uint()))
case reflect.Uint8:
out = append(out, uint8(field.Uint()))
case reflect.String:
s := field.String()
out = appendInt(out, len(s))
out = append(out, s...)
case reflect.Slice:
switch t.Elem().Kind() {
case reflect.Uint8:
if v.Type().Field(i).Tag.Get("ssh") != "rest" {
out = appendInt(out, field.Len())
}
out = append(out, field.Bytes()...)
case reflect.String:
offset := len(out)
out = appendU32(out, 0)
if n := field.Len(); n > 0 {
for j := 0; j < n; j++ {
f := field.Index(j)
if j != 0 {
out = append(out, ',')
}
out = append(out, f.String()...)
}
// overwrite length value
binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4))
}
default:
panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface()))
}
case reflect.Ptr:
if t == bigIntType {
var n *big.Int
nValue := reflect.ValueOf(&n)
nValue.Elem().Set(field)
needed := intLength(n)
oldLength := len(out)
if cap(out)-len(out) < needed {
newOut := make([]byte, len(out), 2*(len(out)+needed))
copy(newOut, out)
out = newOut
}
out = out[:oldLength+needed]
marshalInt(out[oldLength:], n)
} else {
panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface()))
}
}
}
return out
}
var bigOne = big.NewInt(1)
func parseString(in []byte) (out, rest []byte, ok bool) {
if len(in) < 4 {
return
}
length := binary.BigEndian.Uint32(in)
in = in[4:]
if uint32(len(in)) < length {
return
}
out = in[:length]
rest = in[length:]
ok = true
return
}
var (
comma = []byte{','}
emptyNameList = []string{}
)
func parseNameList(in []byte) (out []string, rest []byte, ok bool) {
contents, rest, ok := parseString(in)
if !ok {
return
}
if len(contents) == 0 {
out = emptyNameList
return
}
parts := bytes.Split(contents, comma)
out = make([]string, len(parts))
for i, part := range parts {
out[i] = string(part)
}
return
}
func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) {
contents, rest, ok := parseString(in)
if !ok {
return
}
out = new(big.Int)
if len(contents) > 0 && contents[0]&0x80 == 0x80 {
// This is a negative number
notBytes := make([]byte, len(contents))
for i := range notBytes {
notBytes[i] = ^contents[i]
}
out.SetBytes(notBytes)
out.Add(out, bigOne)
out.Neg(out)
} else {
// Positive number
out.SetBytes(contents)
}
ok = true
return
}
func parseUint32(in []byte) (uint32, []byte, bool) {
if len(in) < 4 {
return 0, nil, false
}
return binary.BigEndian.Uint32(in), in[4:], true
}
func parseUint64(in []byte) (uint64, []byte, bool) {
if len(in) < 8 {
return 0, nil, false
}
return binary.BigEndian.Uint64(in), in[8:], true
}
func intLength(n *big.Int) int {
length := 4 /* length bytes */
if n.Sign() < 0 {
nMinus1 := new(big.Int).Neg(n)
nMinus1.Sub(nMinus1, bigOne)
bitLen := nMinus1.BitLen()
if bitLen%8 == 0 {
// The number will need 0xff padding
length++
}
length += (bitLen + 7) / 8
} else if n.Sign() == 0 {
// A zero is the zero length string
} else {
bitLen := n.BitLen()
if bitLen%8 == 0 {
// The number will need 0x00 padding
length++
}
length += (bitLen + 7) / 8
}
return length
}
func marshalUint32(to []byte, n uint32) []byte {
binary.BigEndian.PutUint32(to, n)
return to[4:]
}
func marshalUint64(to []byte, n uint64) []byte {
binary.BigEndian.PutUint64(to, n)
return to[8:]
}
func marshalInt(to []byte, n *big.Int) []byte {
lengthBytes := to
to = to[4:]
length := 0
if n.Sign() < 0 {
// A negative number has to be converted to two's-complement
// form. So we'll subtract 1 and invert. If the
// most-significant-bit isn't set then we'll need to pad the
// beginning with 0xff in order to keep the number negative.
nMinus1 := new(big.Int).Neg(n)
nMinus1.Sub(nMinus1, bigOne)
bytes := nMinus1.Bytes()
for i := range bytes {
bytes[i] ^= 0xff
}
if len(bytes) == 0 || bytes[0]&0x80 == 0 {
to[0] = 0xff
to = to[1:]
length++
}
nBytes := copy(to, bytes)
to = to[nBytes:]
length += nBytes
} else if n.Sign() == 0 {
// A zero is the zero length string
} else {
bytes := n.Bytes()
if len(bytes) > 0 && bytes[0]&0x80 != 0 {
// We'll have to pad this with a 0x00 in order to
// stop it looking like a negative number.
to[0] = 0
to = to[1:]
length++
}
nBytes := copy(to, bytes)
to = to[nBytes:]
length += nBytes
}
lengthBytes[0] = byte(length >> 24)
lengthBytes[1] = byte(length >> 16)
lengthBytes[2] = byte(length >> 8)
lengthBytes[3] = byte(length)
return to
}
func writeInt(w io.Writer, n *big.Int) {
length := intLength(n)
buf := make([]byte, length)
marshalInt(buf, n)
w.Write(buf)
}
func writeString(w io.Writer, s []byte) {
var lengthBytes [4]byte
lengthBytes[0] = byte(len(s) >> 24)
lengthBytes[1] = byte(len(s) >> 16)
lengthBytes[2] = byte(len(s) >> 8)
lengthBytes[3] = byte(len(s))
w.Write(lengthBytes[:])
w.Write(s)
}
func stringLength(n int) int {
return 4 + n
}
func marshalString(to []byte, s []byte) []byte {
to[0] = byte(len(s) >> 24)
to[1] = byte(len(s) >> 16)
to[2] = byte(len(s) >> 8)
to[3] = byte(len(s))
to = to[4:]
copy(to, s)
return to[len(s):]
}
var bigIntType = reflect.TypeOf((*big.Int)(nil))
// Decode a packet into its corresponding message.
func decode(packet []byte) (interface{}, error) {
var msg interface{}
switch packet[0] {
case msgDisconnect:
msg = new(disconnectMsg)
case msgServiceRequest:
msg = new(serviceRequestMsg)
case msgServiceAccept:
msg = new(serviceAcceptMsg)
case msgKexInit:
msg = new(kexInitMsg)
case msgKexDHInit:
msg = new(kexDHInitMsg)
case msgKexDHReply:
msg = new(kexDHReplyMsg)
case msgUserAuthRequest:
msg = new(userAuthRequestMsg)
case msgUserAuthSuccess:
return new(userAuthSuccessMsg), nil
case msgUserAuthFailure:
msg = new(userAuthFailureMsg)
case msgUserAuthPubKeyOk:
msg = new(userAuthPubKeyOkMsg)
case msgGlobalRequest:
msg = new(globalRequestMsg)
case msgRequestSuccess:
msg = new(globalRequestSuccessMsg)
case msgRequestFailure:
msg = new(globalRequestFailureMsg)
case msgChannelOpen:
msg = new(channelOpenMsg)
case msgChannelData:
msg = new(channelDataMsg)
case msgChannelOpenConfirm:
msg = new(channelOpenConfirmMsg)
case msgChannelOpenFailure:
msg = new(channelOpenFailureMsg)
case msgChannelWindowAdjust:
msg = new(windowAdjustMsg)
case msgChannelEOF:
msg = new(channelEOFMsg)
case msgChannelClose:
msg = new(channelCloseMsg)
case msgChannelRequest:
msg = new(channelRequestMsg)
case msgChannelSuccess:
msg = new(channelRequestSuccessMsg)
case msgChannelFailure:
msg = new(channelRequestFailureMsg)
default:
return nil, unexpectedMessageError(0, packet[0])
}
if err := Unmarshal(packet, msg); err != nil {
return nil, err
}
return msg, nil
}

330
vendor/golang.org/x/crypto/ssh/mux.go generated vendored Normal file
View File

@ -0,0 +1,330 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"encoding/binary"
"fmt"
"io"
"log"
"sync"
"sync/atomic"
)
// debugMux, if set, causes messages in the connection protocol to be
// logged.
const debugMux = false
// chanList is a thread safe channel list.
type chanList struct {
// protects concurrent access to chans
sync.Mutex
// chans are indexed by the local id of the channel, which the
// other side should send in the PeersId field.
chans []*channel
// This is a debugging aid: it offsets all IDs by this
// amount. This helps distinguish otherwise identical
// server/client muxes
offset uint32
}
// Assigns a channel ID to the given channel.
func (c *chanList) add(ch *channel) uint32 {
c.Lock()
defer c.Unlock()
for i := range c.chans {
if c.chans[i] == nil {
c.chans[i] = ch
return uint32(i) + c.offset
}
}
c.chans = append(c.chans, ch)
return uint32(len(c.chans)-1) + c.offset
}
// getChan returns the channel for the given ID.
func (c *chanList) getChan(id uint32) *channel {
id -= c.offset
c.Lock()
defer c.Unlock()
if id < uint32(len(c.chans)) {
return c.chans[id]
}
return nil
}
func (c *chanList) remove(id uint32) {
id -= c.offset
c.Lock()
if id < uint32(len(c.chans)) {
c.chans[id] = nil
}
c.Unlock()
}
// dropAll forgets all channels it knows, returning them in a slice.
func (c *chanList) dropAll() []*channel {
c.Lock()
defer c.Unlock()
var r []*channel
for _, ch := range c.chans {
if ch == nil {
continue
}
r = append(r, ch)
}
c.chans = nil
return r
}
// mux represents the state for the SSH connection protocol, which
// multiplexes many channels onto a single packet transport.
type mux struct {
conn packetConn
chanList chanList
incomingChannels chan NewChannel
globalSentMu sync.Mutex
globalResponses chan interface{}
incomingRequests chan *Request
errCond *sync.Cond
err error
}
// When debugging, each new chanList instantiation has a different
// offset.
var globalOff uint32
func (m *mux) Wait() error {
m.errCond.L.Lock()
defer m.errCond.L.Unlock()
for m.err == nil {
m.errCond.Wait()
}
return m.err
}
// newMux returns a mux that runs over the given connection.
func newMux(p packetConn) *mux {
m := &mux{
conn: p,
incomingChannels: make(chan NewChannel, chanSize),
globalResponses: make(chan interface{}, 1),
incomingRequests: make(chan *Request, chanSize),
errCond: newCond(),
}
if debugMux {
m.chanList.offset = atomic.AddUint32(&globalOff, 1)
}
go m.loop()
return m
}
func (m *mux) sendMessage(msg interface{}) error {
p := Marshal(msg)
if debugMux {
log.Printf("send global(%d): %#v", m.chanList.offset, msg)
}
return m.conn.writePacket(p)
}
func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) {
if wantReply {
m.globalSentMu.Lock()
defer m.globalSentMu.Unlock()
}
if err := m.sendMessage(globalRequestMsg{
Type: name,
WantReply: wantReply,
Data: payload,
}); err != nil {
return false, nil, err
}
if !wantReply {
return false, nil, nil
}
msg, ok := <-m.globalResponses
if !ok {
return false, nil, io.EOF
}
switch msg := msg.(type) {
case *globalRequestFailureMsg:
return false, msg.Data, nil
case *globalRequestSuccessMsg:
return true, msg.Data, nil
default:
return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg)
}
}
// ackRequest must be called after processing a global request that
// has WantReply set.
func (m *mux) ackRequest(ok bool, data []byte) error {
if ok {
return m.sendMessage(globalRequestSuccessMsg{Data: data})
}
return m.sendMessage(globalRequestFailureMsg{Data: data})
}
func (m *mux) Close() error {
return m.conn.Close()
}
// loop runs the connection machine. It will process packets until an
// error is encountered. To synchronize on loop exit, use mux.Wait.
func (m *mux) loop() {
var err error
for err == nil {
err = m.onePacket()
}
for _, ch := range m.chanList.dropAll() {
ch.close()
}
close(m.incomingChannels)
close(m.incomingRequests)
close(m.globalResponses)
m.conn.Close()
m.errCond.L.Lock()
m.err = err
m.errCond.Broadcast()
m.errCond.L.Unlock()
if debugMux {
log.Println("loop exit", err)
}
}
// onePacket reads and processes one packet.
func (m *mux) onePacket() error {
packet, err := m.conn.readPacket()
if err != nil {
return err
}
if debugMux {
if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData {
log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet))
} else {
p, _ := decode(packet)
log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet))
}
}
switch packet[0] {
case msgChannelOpen:
return m.handleChannelOpen(packet)
case msgGlobalRequest, msgRequestSuccess, msgRequestFailure:
return m.handleGlobalPacket(packet)
}
// assume a channel packet.
if len(packet) < 5 {
return parseError(packet[0])
}
id := binary.BigEndian.Uint32(packet[1:])
ch := m.chanList.getChan(id)
if ch == nil {
return fmt.Errorf("ssh: invalid channel %d", id)
}
return ch.handlePacket(packet)
}
func (m *mux) handleGlobalPacket(packet []byte) error {
msg, err := decode(packet)
if err != nil {
return err
}
switch msg := msg.(type) {
case *globalRequestMsg:
m.incomingRequests <- &Request{
Type: msg.Type,
WantReply: msg.WantReply,
Payload: msg.Data,
mux: m,
}
case *globalRequestSuccessMsg, *globalRequestFailureMsg:
m.globalResponses <- msg
default:
panic(fmt.Sprintf("not a global message %#v", msg))
}
return nil
}
// handleChannelOpen schedules a channel to be Accept()ed.
func (m *mux) handleChannelOpen(packet []byte) error {
var msg channelOpenMsg
if err := Unmarshal(packet, &msg); err != nil {
return err
}
if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
failMsg := channelOpenFailureMsg{
PeersId: msg.PeersId,
Reason: ConnectionFailed,
Message: "invalid request",
Language: "en_US.UTF-8",
}
return m.sendMessage(failMsg)
}
c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData)
c.remoteId = msg.PeersId
c.maxRemotePayload = msg.MaxPacketSize
c.remoteWin.add(msg.PeersWindow)
m.incomingChannels <- c
return nil
}
func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) {
ch, err := m.openChannel(chanType, extra)
if err != nil {
return nil, nil, err
}
return ch, ch.incomingRequests, nil
}
func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) {
ch := m.newChannel(chanType, channelOutbound, extra)
ch.maxIncomingPayload = channelMaxPacket
open := channelOpenMsg{
ChanType: chanType,
PeersWindow: ch.myWindow,
MaxPacketSize: ch.maxIncomingPayload,
TypeSpecificData: extra,
PeersId: ch.localId,
}
if err := m.sendMessage(open); err != nil {
return nil, err
}
switch msg := (<-ch.msg).(type) {
case *channelOpenConfirmMsg:
return ch, nil
case *channelOpenFailureMsg:
return nil, &OpenChannelError{msg.Reason, msg.Message}
default:
return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg)
}
}

491
vendor/golang.org/x/crypto/ssh/server.go generated vendored Normal file
View File

@ -0,0 +1,491 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"bytes"
"errors"
"fmt"
"io"
"net"
"strings"
)
// The Permissions type holds fine-grained permissions that are
// specific to a user or a specific authentication method for a
// user. Permissions, except for "source-address", must be enforced in
// the server application layer, after successful authentication. The
// Permissions are passed on in ServerConn so a server implementation
// can honor them.
type Permissions struct {
// Critical options restrict default permissions. Common
// restrictions are "source-address" and "force-command". If
// the server cannot enforce the restriction, or does not
// recognize it, the user should not authenticate.
CriticalOptions map[string]string
// Extensions are extra functionality that the server may
// offer on authenticated connections. Common extensions are
// "permit-agent-forwarding", "permit-X11-forwarding". Lack of
// support for an extension does not preclude authenticating a
// user.
Extensions map[string]string
}
// ServerConfig holds server specific configuration data.
type ServerConfig struct {
// Config contains configuration shared between client and server.
Config
hostKeys []Signer
// NoClientAuth is true if clients are allowed to connect without
// authenticating.
NoClientAuth bool
// PasswordCallback, if non-nil, is called when a user
// attempts to authenticate using a password.
PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error)
// PublicKeyCallback, if non-nil, is called when a client attempts public
// key authentication. It must return true if the given public key is
// valid for the given user. For example, see CertChecker.Authenticate.
PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
// KeyboardInteractiveCallback, if non-nil, is called when
// keyboard-interactive authentication is selected (RFC
// 4256). The client object's Challenge function should be
// used to query the user. The callback may offer multiple
// Challenge rounds. To avoid information leaks, the client
// should be presented a challenge even if the user is
// unknown.
KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error)
// AuthLogCallback, if non-nil, is called to log all authentication
// attempts.
AuthLogCallback func(conn ConnMetadata, method string, err error)
// ServerVersion is the version identification string to announce in
// the public handshake.
// If empty, a reasonable default is used.
// Note that RFC 4253 section 4.2 requires that this string start with
// "SSH-2.0-".
ServerVersion string
}
// AddHostKey adds a private key as a host key. If an existing host
// key exists with the same algorithm, it is overwritten. Each server
// config must have at least one host key.
func (s *ServerConfig) AddHostKey(key Signer) {
for i, k := range s.hostKeys {
if k.PublicKey().Type() == key.PublicKey().Type() {
s.hostKeys[i] = key
return
}
}
s.hostKeys = append(s.hostKeys, key)
}
// cachedPubKey contains the results of querying whether a public key is
// acceptable for a user.
type cachedPubKey struct {
user string
pubKeyData []byte
result error
perms *Permissions
}
const maxCachedPubKeys = 16
// pubKeyCache caches tests for public keys. Since SSH clients
// will query whether a public key is acceptable before attempting to
// authenticate with it, we end up with duplicate queries for public
// key validity. The cache only applies to a single ServerConn.
type pubKeyCache struct {
keys []cachedPubKey
}
// get returns the result for a given user/algo/key tuple.
func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) {
for _, k := range c.keys {
if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) {
return k, true
}
}
return cachedPubKey{}, false
}
// add adds the given tuple to the cache.
func (c *pubKeyCache) add(candidate cachedPubKey) {
if len(c.keys) < maxCachedPubKeys {
c.keys = append(c.keys, candidate)
}
}
// ServerConn is an authenticated SSH connection, as seen from the
// server
type ServerConn struct {
Conn
// If the succeeding authentication callback returned a
// non-nil Permissions pointer, it is stored here.
Permissions *Permissions
}
// NewServerConn starts a new SSH server with c as the underlying
// transport. It starts with a handshake and, if the handshake is
// unsuccessful, it closes the connection and returns an error. The
// Request and NewChannel channels must be serviced, or the connection
// will hang.
func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) {
fullConf := *config
fullConf.SetDefaults()
s := &connection{
sshConn: sshConn{conn: c},
}
perms, err := s.serverHandshake(&fullConf)
if err != nil {
c.Close()
return nil, nil, nil, err
}
return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil
}
// signAndMarshal signs the data with the appropriate algorithm,
// and serializes the result in SSH wire format.
func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) {
sig, err := k.Sign(rand, data)
if err != nil {
return nil, err
}
return Marshal(sig), nil
}
// handshake performs key exchange and user authentication.
func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) {
if len(config.hostKeys) == 0 {
return nil, errors.New("ssh: server has no host keys")
}
if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil {
return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
}
if config.ServerVersion != "" {
s.serverVersion = []byte(config.ServerVersion)
} else {
s.serverVersion = []byte(packageVersion)
}
var err error
s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion)
if err != nil {
return nil, err
}
tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */)
s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config)
if err := s.transport.waitSession(); err != nil {
return nil, err
}
// We just did the key change, so the session ID is established.
s.sessionID = s.transport.getSessionID()
var packet []byte
if packet, err = s.transport.readPacket(); err != nil {
return nil, err
}
var serviceRequest serviceRequestMsg
if err = Unmarshal(packet, &serviceRequest); err != nil {
return nil, err
}
if serviceRequest.Service != serviceUserAuth {
return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating")
}
serviceAccept := serviceAcceptMsg{
Service: serviceUserAuth,
}
if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil {
return nil, err
}
perms, err := s.serverAuthenticate(config)
if err != nil {
return nil, err
}
s.mux = newMux(s.transport)
return perms, err
}
func isAcceptableAlgo(algo string) bool {
switch algo {
case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519,
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01:
return true
}
return false
}
func checkSourceAddress(addr net.Addr, sourceAddrs string) error {
if addr == nil {
return errors.New("ssh: no address known for client, but source-address match required")
}
tcpAddr, ok := addr.(*net.TCPAddr)
if !ok {
return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr)
}
for _, sourceAddr := range strings.Split(sourceAddrs, ",") {
if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil {
if allowedIP.Equal(tcpAddr.IP) {
return nil
}
} else {
_, ipNet, err := net.ParseCIDR(sourceAddr)
if err != nil {
return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err)
}
if ipNet.Contains(tcpAddr.IP) {
return nil
}
}
}
return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr)
}
func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) {
sessionID := s.transport.getSessionID()
var cache pubKeyCache
var perms *Permissions
userAuthLoop:
for {
var userAuthReq userAuthRequestMsg
if packet, err := s.transport.readPacket(); err != nil {
return nil, err
} else if err = Unmarshal(packet, &userAuthReq); err != nil {
return nil, err
}
if userAuthReq.Service != serviceSSH {
return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service)
}
s.user = userAuthReq.User
perms = nil
authErr := errors.New("no auth passed yet")
switch userAuthReq.Method {
case "none":
if config.NoClientAuth {
authErr = nil
}
case "password":
if config.PasswordCallback == nil {
authErr = errors.New("ssh: password auth not configured")
break
}
payload := userAuthReq.Payload
if len(payload) < 1 || payload[0] != 0 {
return nil, parseError(msgUserAuthRequest)
}
payload = payload[1:]
password, payload, ok := parseString(payload)
if !ok || len(payload) > 0 {
return nil, parseError(msgUserAuthRequest)
}
perms, authErr = config.PasswordCallback(s, password)
case "keyboard-interactive":
if config.KeyboardInteractiveCallback == nil {
authErr = errors.New("ssh: keyboard-interactive auth not configubred")
break
}
prompter := &sshClientKeyboardInteractive{s}
perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge)
case "publickey":
if config.PublicKeyCallback == nil {
authErr = errors.New("ssh: publickey auth not configured")
break
}
payload := userAuthReq.Payload
if len(payload) < 1 {
return nil, parseError(msgUserAuthRequest)
}
isQuery := payload[0] == 0
payload = payload[1:]
algoBytes, payload, ok := parseString(payload)
if !ok {
return nil, parseError(msgUserAuthRequest)
}
algo := string(algoBytes)
if !isAcceptableAlgo(algo) {
authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
break
}
pubKeyData, payload, ok := parseString(payload)
if !ok {
return nil, parseError(msgUserAuthRequest)
}
pubKey, err := ParsePublicKey(pubKeyData)
if err != nil {
return nil, err
}
candidate, ok := cache.get(s.user, pubKeyData)
if !ok {
candidate.user = s.user
candidate.pubKeyData = pubKeyData
candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey)
if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" {
candidate.result = checkSourceAddress(
s.RemoteAddr(),
candidate.perms.CriticalOptions[sourceAddressCriticalOption])
}
cache.add(candidate)
}
if isQuery {
// The client can query if the given public key
// would be okay.
if len(payload) > 0 {
return nil, parseError(msgUserAuthRequest)
}
if candidate.result == nil {
okMsg := userAuthPubKeyOkMsg{
Algo: algo,
PubKey: pubKeyData,
}
if err = s.transport.writePacket(Marshal(&okMsg)); err != nil {
return nil, err
}
continue userAuthLoop
}
authErr = candidate.result
} else {
sig, payload, ok := parseSignature(payload)
if !ok || len(payload) > 0 {
return nil, parseError(msgUserAuthRequest)
}
// Ensure the public key algo and signature algo
// are supported. Compare the private key
// algorithm name that corresponds to algo with
// sig.Format. This is usually the same, but
// for certs, the names differ.
if !isAcceptableAlgo(sig.Format) {
break
}
signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData)
if err := pubKey.Verify(signedData, sig); err != nil {
return nil, err
}
authErr = candidate.result
perms = candidate.perms
}
default:
authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method)
}
if config.AuthLogCallback != nil {
config.AuthLogCallback(s, userAuthReq.Method, authErr)
}
if authErr == nil {
break userAuthLoop
}
var failureMsg userAuthFailureMsg
if config.PasswordCallback != nil {
failureMsg.Methods = append(failureMsg.Methods, "password")
}
if config.PublicKeyCallback != nil {
failureMsg.Methods = append(failureMsg.Methods, "publickey")
}
if config.KeyboardInteractiveCallback != nil {
failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive")
}
if len(failureMsg.Methods) == 0 {
return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
}
if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil {
return nil, err
}
}
if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil {
return nil, err
}
return perms, nil
}
// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by
// asking the client on the other side of a ServerConn.
type sshClientKeyboardInteractive struct {
*connection
}
func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) {
if len(questions) != len(echos) {
return nil, errors.New("ssh: echos and questions must have equal length")
}
var prompts []byte
for i := range questions {
prompts = appendString(prompts, questions[i])
prompts = appendBool(prompts, echos[i])
}
if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{
Instruction: instruction,
NumPrompts: uint32(len(questions)),
Prompts: prompts,
})); err != nil {
return nil, err
}
packet, err := c.transport.readPacket()
if err != nil {
return nil, err
}
if packet[0] != msgUserAuthInfoResponse {
return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0])
}
packet = packet[1:]
n, packet, ok := parseUint32(packet)
if !ok || int(n) != len(questions) {
return nil, parseError(msgUserAuthInfoResponse)
}
for i := uint32(0); i < n; i++ {
ans, rest, ok := parseString(packet)
if !ok {
return nil, parseError(msgUserAuthInfoResponse)
}
answers = append(answers, string(ans))
packet = rest
}
if len(packet) != 0 {
return nil, errors.New("ssh: junk at end of message")
}
return answers, nil
}

627
vendor/golang.org/x/crypto/ssh/session.go generated vendored Normal file
View File

@ -0,0 +1,627 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
// Session implements an interactive session described in
// "RFC 4254, section 6".
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"sync"
)
type Signal string
// POSIX signals as listed in RFC 4254 Section 6.10.
const (
SIGABRT Signal = "ABRT"
SIGALRM Signal = "ALRM"
SIGFPE Signal = "FPE"
SIGHUP Signal = "HUP"
SIGILL Signal = "ILL"
SIGINT Signal = "INT"
SIGKILL Signal = "KILL"
SIGPIPE Signal = "PIPE"
SIGQUIT Signal = "QUIT"
SIGSEGV Signal = "SEGV"
SIGTERM Signal = "TERM"
SIGUSR1 Signal = "USR1"
SIGUSR2 Signal = "USR2"
)
var signals = map[Signal]int{
SIGABRT: 6,
SIGALRM: 14,
SIGFPE: 8,
SIGHUP: 1,
SIGILL: 4,
SIGINT: 2,
SIGKILL: 9,
SIGPIPE: 13,
SIGQUIT: 3,
SIGSEGV: 11,
SIGTERM: 15,
}
type TerminalModes map[uint8]uint32
// POSIX terminal mode flags as listed in RFC 4254 Section 8.
const (
tty_OP_END = 0
VINTR = 1
VQUIT = 2
VERASE = 3
VKILL = 4
VEOF = 5
VEOL = 6
VEOL2 = 7
VSTART = 8
VSTOP = 9
VSUSP = 10
VDSUSP = 11
VREPRINT = 12
VWERASE = 13
VLNEXT = 14
VFLUSH = 15
VSWTCH = 16
VSTATUS = 17
VDISCARD = 18
IGNPAR = 30
PARMRK = 31
INPCK = 32
ISTRIP = 33
INLCR = 34
IGNCR = 35
ICRNL = 36
IUCLC = 37
IXON = 38
IXANY = 39
IXOFF = 40
IMAXBEL = 41
ISIG = 50
ICANON = 51
XCASE = 52
ECHO = 53
ECHOE = 54
ECHOK = 55
ECHONL = 56
NOFLSH = 57
TOSTOP = 58
IEXTEN = 59
ECHOCTL = 60
ECHOKE = 61
PENDIN = 62
OPOST = 70
OLCUC = 71
ONLCR = 72
OCRNL = 73
ONOCR = 74
ONLRET = 75
CS7 = 90
CS8 = 91
PARENB = 92
PARODD = 93
TTY_OP_ISPEED = 128
TTY_OP_OSPEED = 129
)
// A Session represents a connection to a remote command or shell.
type Session struct {
// Stdin specifies the remote process's standard input.
// If Stdin is nil, the remote process reads from an empty
// bytes.Buffer.
Stdin io.Reader
// Stdout and Stderr specify the remote process's standard
// output and error.
//
// If either is nil, Run connects the corresponding file
// descriptor to an instance of ioutil.Discard. There is a
// fixed amount of buffering that is shared for the two streams.
// If either blocks it may eventually cause the remote
// command to block.
Stdout io.Writer
Stderr io.Writer
ch Channel // the channel backing this session
started bool // true once Start, Run or Shell is invoked.
copyFuncs []func() error
errors chan error // one send per copyFunc
// true if pipe method is active
stdinpipe, stdoutpipe, stderrpipe bool
// stdinPipeWriter is non-nil if StdinPipe has not been called
// and Stdin was specified by the user; it is the write end of
// a pipe connecting Session.Stdin to the stdin channel.
stdinPipeWriter io.WriteCloser
exitStatus chan error
}
// SendRequest sends an out-of-band channel request on the SSH channel
// underlying the session.
func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
return s.ch.SendRequest(name, wantReply, payload)
}
func (s *Session) Close() error {
return s.ch.Close()
}
// RFC 4254 Section 6.4.
type setenvRequest struct {
Name string
Value string
}
// Setenv sets an environment variable that will be applied to any
// command executed by Shell or Run.
func (s *Session) Setenv(name, value string) error {
msg := setenvRequest{
Name: name,
Value: value,
}
ok, err := s.ch.SendRequest("env", true, Marshal(&msg))
if err == nil && !ok {
err = errors.New("ssh: setenv failed")
}
return err
}
// RFC 4254 Section 6.2.
type ptyRequestMsg struct {
Term string
Columns uint32
Rows uint32
Width uint32
Height uint32
Modelist string
}
// RequestPty requests the association of a pty with the session on the remote host.
func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error {
var tm []byte
for k, v := range termmodes {
kv := struct {
Key byte
Val uint32
}{k, v}
tm = append(tm, Marshal(&kv)...)
}
tm = append(tm, tty_OP_END)
req := ptyRequestMsg{
Term: term,
Columns: uint32(w),
Rows: uint32(h),
Width: uint32(w * 8),
Height: uint32(h * 8),
Modelist: string(tm),
}
ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req))
if err == nil && !ok {
err = errors.New("ssh: pty-req failed")
}
return err
}
// RFC 4254 Section 6.5.
type subsystemRequestMsg struct {
Subsystem string
}
// RequestSubsystem requests the association of a subsystem with the session on the remote host.
// A subsystem is a predefined command that runs in the background when the ssh session is initiated
func (s *Session) RequestSubsystem(subsystem string) error {
msg := subsystemRequestMsg{
Subsystem: subsystem,
}
ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg))
if err == nil && !ok {
err = errors.New("ssh: subsystem request failed")
}
return err
}
// RFC 4254 Section 6.9.
type signalMsg struct {
Signal string
}
// Signal sends the given signal to the remote process.
// sig is one of the SIG* constants.
func (s *Session) Signal(sig Signal) error {
msg := signalMsg{
Signal: string(sig),
}
_, err := s.ch.SendRequest("signal", false, Marshal(&msg))
return err
}
// RFC 4254 Section 6.5.
type execMsg struct {
Command string
}
// Start runs cmd on the remote host. Typically, the remote
// server passes cmd to the shell for interpretation.
// A Session only accepts one call to Run, Start or Shell.
func (s *Session) Start(cmd string) error {
if s.started {
return errors.New("ssh: session already started")
}
req := execMsg{
Command: cmd,
}
ok, err := s.ch.SendRequest("exec", true, Marshal(&req))
if err == nil && !ok {
err = fmt.Errorf("ssh: command %v failed", cmd)
}
if err != nil {
return err
}
return s.start()
}
// Run runs cmd on the remote host. Typically, the remote
// server passes cmd to the shell for interpretation.
// A Session only accepts one call to Run, Start, Shell, Output,
// or CombinedOutput.
//
// The returned error is nil if the command runs, has no problems
// copying stdin, stdout, and stderr, and exits with a zero exit
// status.
//
// If the remote server does not send an exit status, an error of type
// *ExitMissingError is returned. If the command completes
// unsuccessfully or is interrupted by a signal, the error is of type
// *ExitError. Other error types may be returned for I/O problems.
func (s *Session) Run(cmd string) error {
err := s.Start(cmd)
if err != nil {
return err
}
return s.Wait()
}
// Output runs cmd on the remote host and returns its standard output.
func (s *Session) Output(cmd string) ([]byte, error) {
if s.Stdout != nil {
return nil, errors.New("ssh: Stdout already set")
}
var b bytes.Buffer
s.Stdout = &b
err := s.Run(cmd)
return b.Bytes(), err
}
type singleWriter struct {
b bytes.Buffer
mu sync.Mutex
}
func (w *singleWriter) Write(p []byte) (int, error) {
w.mu.Lock()
defer w.mu.Unlock()
return w.b.Write(p)
}
// CombinedOutput runs cmd on the remote host and returns its combined
// standard output and standard error.
func (s *Session) CombinedOutput(cmd string) ([]byte, error) {
if s.Stdout != nil {
return nil, errors.New("ssh: Stdout already set")
}
if s.Stderr != nil {
return nil, errors.New("ssh: Stderr already set")
}
var b singleWriter
s.Stdout = &b
s.Stderr = &b
err := s.Run(cmd)
return b.b.Bytes(), err
}
// Shell starts a login shell on the remote host. A Session only
// accepts one call to Run, Start, Shell, Output, or CombinedOutput.
func (s *Session) Shell() error {
if s.started {
return errors.New("ssh: session already started")
}
ok, err := s.ch.SendRequest("shell", true, nil)
if err == nil && !ok {
return errors.New("ssh: could not start shell")
}
if err != nil {
return err
}
return s.start()
}
func (s *Session) start() error {
s.started = true
type F func(*Session)
for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} {
setupFd(s)
}
s.errors = make(chan error, len(s.copyFuncs))
for _, fn := range s.copyFuncs {
go func(fn func() error) {
s.errors <- fn()
}(fn)
}
return nil
}
// Wait waits for the remote command to exit.
//
// The returned error is nil if the command runs, has no problems
// copying stdin, stdout, and stderr, and exits with a zero exit
// status.
//
// If the remote server does not send an exit status, an error of type
// *ExitMissingError is returned. If the command completes
// unsuccessfully or is interrupted by a signal, the error is of type
// *ExitError. Other error types may be returned for I/O problems.
func (s *Session) Wait() error {
if !s.started {
return errors.New("ssh: session not started")
}
waitErr := <-s.exitStatus
if s.stdinPipeWriter != nil {
s.stdinPipeWriter.Close()
}
var copyError error
for _ = range s.copyFuncs {
if err := <-s.errors; err != nil && copyError == nil {
copyError = err
}
}
if waitErr != nil {
return waitErr
}
return copyError
}
func (s *Session) wait(reqs <-chan *Request) error {
wm := Waitmsg{status: -1}
// Wait for msg channel to be closed before returning.
for msg := range reqs {
switch msg.Type {
case "exit-status":
wm.status = int(binary.BigEndian.Uint32(msg.Payload))
case "exit-signal":
var sigval struct {
Signal string
CoreDumped bool
Error string
Lang string
}
if err := Unmarshal(msg.Payload, &sigval); err != nil {
return err
}
// Must sanitize strings?
wm.signal = sigval.Signal
wm.msg = sigval.Error
wm.lang = sigval.Lang
default:
// This handles keepalives and matches
// OpenSSH's behaviour.
if msg.WantReply {
msg.Reply(false, nil)
}
}
}
if wm.status == 0 {
return nil
}
if wm.status == -1 {
// exit-status was never sent from server
if wm.signal == "" {
// signal was not sent either. RFC 4254
// section 6.10 recommends against this
// behavior, but it is allowed, so we let
// clients handle it.
return &ExitMissingError{}
}
wm.status = 128
if _, ok := signals[Signal(wm.signal)]; ok {
wm.status += signals[Signal(wm.signal)]
}
}
return &ExitError{wm}
}
// ExitMissingError is returned if a session is torn down cleanly, but
// the server sends no confirmation of the exit status.
type ExitMissingError struct{}
func (e *ExitMissingError) Error() string {
return "wait: remote command exited without exit status or exit signal"
}
func (s *Session) stdin() {
if s.stdinpipe {
return
}
var stdin io.Reader
if s.Stdin == nil {
stdin = new(bytes.Buffer)
} else {
r, w := io.Pipe()
go func() {
_, err := io.Copy(w, s.Stdin)
w.CloseWithError(err)
}()
stdin, s.stdinPipeWriter = r, w
}
s.copyFuncs = append(s.copyFuncs, func() error {
_, err := io.Copy(s.ch, stdin)
if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF {
err = err1
}
return err
})
}
func (s *Session) stdout() {
if s.stdoutpipe {
return
}
if s.Stdout == nil {
s.Stdout = ioutil.Discard
}
s.copyFuncs = append(s.copyFuncs, func() error {
_, err := io.Copy(s.Stdout, s.ch)
return err
})
}
func (s *Session) stderr() {
if s.stderrpipe {
return
}
if s.Stderr == nil {
s.Stderr = ioutil.Discard
}
s.copyFuncs = append(s.copyFuncs, func() error {
_, err := io.Copy(s.Stderr, s.ch.Stderr())
return err
})
}
// sessionStdin reroutes Close to CloseWrite.
type sessionStdin struct {
io.Writer
ch Channel
}
func (s *sessionStdin) Close() error {
return s.ch.CloseWrite()
}
// StdinPipe returns a pipe that will be connected to the
// remote command's standard input when the command starts.
func (s *Session) StdinPipe() (io.WriteCloser, error) {
if s.Stdin != nil {
return nil, errors.New("ssh: Stdin already set")
}
if s.started {
return nil, errors.New("ssh: StdinPipe after process started")
}
s.stdinpipe = true
return &sessionStdin{s.ch, s.ch}, nil
}
// StdoutPipe returns a pipe that will be connected to the
// remote command's standard output when the command starts.
// There is a fixed amount of buffering that is shared between
// stdout and stderr streams. If the StdoutPipe reader is
// not serviced fast enough it may eventually cause the
// remote command to block.
func (s *Session) StdoutPipe() (io.Reader, error) {
if s.Stdout != nil {
return nil, errors.New("ssh: Stdout already set")
}
if s.started {
return nil, errors.New("ssh: StdoutPipe after process started")
}
s.stdoutpipe = true
return s.ch, nil
}
// StderrPipe returns a pipe that will be connected to the
// remote command's standard error when the command starts.
// There is a fixed amount of buffering that is shared between
// stdout and stderr streams. If the StderrPipe reader is
// not serviced fast enough it may eventually cause the
// remote command to block.
func (s *Session) StderrPipe() (io.Reader, error) {
if s.Stderr != nil {
return nil, errors.New("ssh: Stderr already set")
}
if s.started {
return nil, errors.New("ssh: StderrPipe after process started")
}
s.stderrpipe = true
return s.ch.Stderr(), nil
}
// newSession returns a new interactive session on the remote host.
func newSession(ch Channel, reqs <-chan *Request) (*Session, error) {
s := &Session{
ch: ch,
}
s.exitStatus = make(chan error, 1)
go func() {
s.exitStatus <- s.wait(reqs)
}()
return s, nil
}
// An ExitError reports unsuccessful completion of a remote command.
type ExitError struct {
Waitmsg
}
func (e *ExitError) Error() string {
return e.Waitmsg.String()
}
// Waitmsg stores the information about an exited remote command
// as reported by Wait.
type Waitmsg struct {
status int
signal string
msg string
lang string
}
// ExitStatus returns the exit status of the remote command.
func (w Waitmsg) ExitStatus() int {
return w.status
}
// Signal returns the exit signal of the remote command if
// it was terminated violently.
func (w Waitmsg) Signal() string {
return w.signal
}
// Msg returns the exit message given by the remote command
func (w Waitmsg) Msg() string {
return w.msg
}
// Lang returns the language tag. See RFC 3066
func (w Waitmsg) Lang() string {
return w.lang
}
func (w Waitmsg) String() string {
str := fmt.Sprintf("Process exited with status %v", w.status)
if w.signal != "" {
str += fmt.Sprintf(" from signal %v", w.signal)
}
if w.msg != "" {
str += fmt.Sprintf(". Reason was: %v", w.msg)
}
return str
}

407
vendor/golang.org/x/crypto/ssh/tcpip.go generated vendored Normal file
View File

@ -0,0 +1,407 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"errors"
"fmt"
"io"
"math/rand"
"net"
"strconv"
"strings"
"sync"
"time"
)
// Listen requests the remote peer open a listening socket on
// addr. Incoming connections will be available by calling Accept on
// the returned net.Listener. The listener must be serviced, or the
// SSH connection may hang.
func (c *Client) Listen(n, addr string) (net.Listener, error) {
laddr, err := net.ResolveTCPAddr(n, addr)
if err != nil {
return nil, err
}
return c.ListenTCP(laddr)
}
// Automatic port allocation is broken with OpenSSH before 6.0. See
// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In
// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0,
// rather than the actual port number. This means you can never open
// two different listeners with auto allocated ports. We work around
// this by trying explicit ports until we succeed.
const openSSHPrefix = "OpenSSH_"
var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano()))
// isBrokenOpenSSHVersion returns true if the given version string
// specifies a version of OpenSSH that is known to have a bug in port
// forwarding.
func isBrokenOpenSSHVersion(versionStr string) bool {
i := strings.Index(versionStr, openSSHPrefix)
if i < 0 {
return false
}
i += len(openSSHPrefix)
j := i
for ; j < len(versionStr); j++ {
if versionStr[j] < '0' || versionStr[j] > '9' {
break
}
}
version, _ := strconv.Atoi(versionStr[i:j])
return version < 6
}
// autoPortListenWorkaround simulates automatic port allocation by
// trying random ports repeatedly.
func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) {
var sshListener net.Listener
var err error
const tries = 10
for i := 0; i < tries; i++ {
addr := *laddr
addr.Port = 1024 + portRandomizer.Intn(60000)
sshListener, err = c.ListenTCP(&addr)
if err == nil {
laddr.Port = addr.Port
return sshListener, err
}
}
return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err)
}
// RFC 4254 7.1
type channelForwardMsg struct {
addr string
rport uint32
}
// ListenTCP requests the remote peer open a listening socket
// on laddr. Incoming connections will be available by calling
// Accept on the returned net.Listener.
func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) {
if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) {
return c.autoPortListenWorkaround(laddr)
}
m := channelForwardMsg{
laddr.IP.String(),
uint32(laddr.Port),
}
// send message
ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m))
if err != nil {
return nil, err
}
if !ok {
return nil, errors.New("ssh: tcpip-forward request denied by peer")
}
// If the original port was 0, then the remote side will
// supply a real port number in the response.
if laddr.Port == 0 {
var p struct {
Port uint32
}
if err := Unmarshal(resp, &p); err != nil {
return nil, err
}
laddr.Port = int(p.Port)
}
// Register this forward, using the port number we obtained.
ch := c.forwards.add(*laddr)
return &tcpListener{laddr, c, ch}, nil
}
// forwardList stores a mapping between remote
// forward requests and the tcpListeners.
type forwardList struct {
sync.Mutex
entries []forwardEntry
}
// forwardEntry represents an established mapping of a laddr on a
// remote ssh server to a channel connected to a tcpListener.
type forwardEntry struct {
laddr net.TCPAddr
c chan forward
}
// forward represents an incoming forwarded tcpip connection. The
// arguments to add/remove/lookup should be address as specified in
// the original forward-request.
type forward struct {
newCh NewChannel // the ssh client channel underlying this forward
raddr *net.TCPAddr // the raddr of the incoming connection
}
func (l *forwardList) add(addr net.TCPAddr) chan forward {
l.Lock()
defer l.Unlock()
f := forwardEntry{
addr,
make(chan forward, 1),
}
l.entries = append(l.entries, f)
return f.c
}
// See RFC 4254, section 7.2
type forwardedTCPPayload struct {
Addr string
Port uint32
OriginAddr string
OriginPort uint32
}
// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr.
func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) {
if port == 0 || port > 65535 {
return nil, fmt.Errorf("ssh: port number out of range: %d", port)
}
ip := net.ParseIP(string(addr))
if ip == nil {
return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr)
}
return &net.TCPAddr{IP: ip, Port: int(port)}, nil
}
func (l *forwardList) handleChannels(in <-chan NewChannel) {
for ch := range in {
var payload forwardedTCPPayload
if err := Unmarshal(ch.ExtraData(), &payload); err != nil {
ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error())
continue
}
// RFC 4254 section 7.2 specifies that incoming
// addresses should list the address, in string
// format. It is implied that this should be an IP
// address, as it would be impossible to connect to it
// otherwise.
laddr, err := parseTCPAddr(payload.Addr, payload.Port)
if err != nil {
ch.Reject(ConnectionFailed, err.Error())
continue
}
raddr, err := parseTCPAddr(payload.OriginAddr, payload.OriginPort)
if err != nil {
ch.Reject(ConnectionFailed, err.Error())
continue
}
if ok := l.forward(*laddr, *raddr, ch); !ok {
// Section 7.2, implementations MUST reject spurious incoming
// connections.
ch.Reject(Prohibited, "no forward for address")
continue
}
}
}
// remove removes the forward entry, and the channel feeding its
// listener.
func (l *forwardList) remove(addr net.TCPAddr) {
l.Lock()
defer l.Unlock()
for i, f := range l.entries {
if addr.IP.Equal(f.laddr.IP) && addr.Port == f.laddr.Port {
l.entries = append(l.entries[:i], l.entries[i+1:]...)
close(f.c)
return
}
}
}
// closeAll closes and clears all forwards.
func (l *forwardList) closeAll() {
l.Lock()
defer l.Unlock()
for _, f := range l.entries {
close(f.c)
}
l.entries = nil
}
func (l *forwardList) forward(laddr, raddr net.TCPAddr, ch NewChannel) bool {
l.Lock()
defer l.Unlock()
for _, f := range l.entries {
if laddr.IP.Equal(f.laddr.IP) && laddr.Port == f.laddr.Port {
f.c <- forward{ch, &raddr}
return true
}
}
return false
}
type tcpListener struct {
laddr *net.TCPAddr
conn *Client
in <-chan forward
}
// Accept waits for and returns the next connection to the listener.
func (l *tcpListener) Accept() (net.Conn, error) {
s, ok := <-l.in
if !ok {
return nil, io.EOF
}
ch, incoming, err := s.newCh.Accept()
if err != nil {
return nil, err
}
go DiscardRequests(incoming)
return &tcpChanConn{
Channel: ch,
laddr: l.laddr,
raddr: s.raddr,
}, nil
}
// Close closes the listener.
func (l *tcpListener) Close() error {
m := channelForwardMsg{
l.laddr.IP.String(),
uint32(l.laddr.Port),
}
// this also closes the listener.
l.conn.forwards.remove(*l.laddr)
ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m))
if err == nil && !ok {
err = errors.New("ssh: cancel-tcpip-forward failed")
}
return err
}
// Addr returns the listener's network address.
func (l *tcpListener) Addr() net.Addr {
return l.laddr
}
// Dial initiates a connection to the addr from the remote host.
// The resulting connection has a zero LocalAddr() and RemoteAddr().
func (c *Client) Dial(n, addr string) (net.Conn, error) {
// Parse the address into host and numeric port.
host, portString, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
port, err := strconv.ParseUint(portString, 10, 16)
if err != nil {
return nil, err
}
// Use a zero address for local and remote address.
zeroAddr := &net.TCPAddr{
IP: net.IPv4zero,
Port: 0,
}
ch, err := c.dial(net.IPv4zero.String(), 0, host, int(port))
if err != nil {
return nil, err
}
return &tcpChanConn{
Channel: ch,
laddr: zeroAddr,
raddr: zeroAddr,
}, nil
}
// DialTCP connects to the remote address raddr on the network net,
// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used
// as the local address for the connection.
func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) {
if laddr == nil {
laddr = &net.TCPAddr{
IP: net.IPv4zero,
Port: 0,
}
}
ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port)
if err != nil {
return nil, err
}
return &tcpChanConn{
Channel: ch,
laddr: laddr,
raddr: raddr,
}, nil
}
// RFC 4254 7.2
type channelOpenDirectMsg struct {
raddr string
rport uint32
laddr string
lport uint32
}
func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) {
msg := channelOpenDirectMsg{
raddr: raddr,
rport: uint32(rport),
laddr: laddr,
lport: uint32(lport),
}
ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg))
if err != nil {
return nil, err
}
go DiscardRequests(in)
return ch, err
}
type tcpChan struct {
Channel // the backing channel
}
// tcpChanConn fulfills the net.Conn interface without
// the tcpChan having to hold laddr or raddr directly.
type tcpChanConn struct {
Channel
laddr, raddr net.Addr
}
// LocalAddr returns the local network address.
func (t *tcpChanConn) LocalAddr() net.Addr {
return t.laddr
}
// RemoteAddr returns the remote network address.
func (t *tcpChanConn) RemoteAddr() net.Addr {
return t.raddr
}
// SetDeadline sets the read and write deadlines associated
// with the connection.
func (t *tcpChanConn) SetDeadline(deadline time.Time) error {
if err := t.SetReadDeadline(deadline); err != nil {
return err
}
return t.SetWriteDeadline(deadline)
}
// SetReadDeadline sets the read deadline.
// A zero value for t means Read will not time out.
// After the deadline, the error from Read will implement net.Error
// with Timeout() == true.
func (t *tcpChanConn) SetReadDeadline(deadline time.Time) error {
return errors.New("ssh: tcpChan: deadline not supported")
}
// SetWriteDeadline exists to satisfy the net.Conn interface
// but is not implemented by this type. It always returns an error.
func (t *tcpChanConn) SetWriteDeadline(deadline time.Time) error {
return errors.New("ssh: tcpChan: deadline not supported")
}

375
vendor/golang.org/x/crypto/ssh/transport.go generated vendored Normal file
View File

@ -0,0 +1,375 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"bufio"
"errors"
"io"
"log"
)
// debugTransport if set, will print packet types as they go over the
// wire. No message decoding is done, to minimize the impact on timing.
const debugTransport = false
const (
gcmCipherID = "aes128-gcm@openssh.com"
aes128cbcID = "aes128-cbc"
tripledescbcID = "3des-cbc"
)
// packetConn represents a transport that implements packet based
// operations.
type packetConn interface {
// Encrypt and send a packet of data to the remote peer.
writePacket(packet []byte) error
// Read a packet from the connection. The read is blocking,
// i.e. if error is nil, then the returned byte slice is
// always non-empty.
readPacket() ([]byte, error)
// Close closes the write-side of the connection.
Close() error
}
// transport is the keyingTransport that implements the SSH packet
// protocol.
type transport struct {
reader connectionState
writer connectionState
bufReader *bufio.Reader
bufWriter *bufio.Writer
rand io.Reader
isClient bool
io.Closer
}
// packetCipher represents a combination of SSH encryption/MAC
// protocol. A single instance should be used for one direction only.
type packetCipher interface {
// writePacket encrypts the packet and writes it to w. The
// contents of the packet are generally scrambled.
writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error
// readPacket reads and decrypts a packet of data. The
// returned packet may be overwritten by future calls of
// readPacket.
readPacket(seqnum uint32, r io.Reader) ([]byte, error)
}
// connectionState represents one side (read or write) of the
// connection. This is necessary because each direction has its own
// keys, and can even have its own algorithms
type connectionState struct {
packetCipher
seqNum uint32
dir direction
pendingKeyChange chan packetCipher
}
// prepareKeyChange sets up key material for a keychange. The key changes in
// both directions are triggered by reading and writing a msgNewKey packet
// respectively.
func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error {
if ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult); err != nil {
return err
} else {
t.reader.pendingKeyChange <- ciph
}
if ciph, err := newPacketCipher(t.writer.dir, algs.w, kexResult); err != nil {
return err
} else {
t.writer.pendingKeyChange <- ciph
}
return nil
}
func (t *transport) printPacket(p []byte, write bool) {
if len(p) == 0 {
return
}
who := "server"
if t.isClient {
who = "client"
}
what := "read"
if write {
what = "write"
}
log.Println(what, who, p[0])
}
// Read and decrypt next packet.
func (t *transport) readPacket() (p []byte, err error) {
for {
p, err = t.reader.readPacket(t.bufReader)
if err != nil {
break
}
if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) {
break
}
}
if debugTransport {
t.printPacket(p, false)
}
return p, err
}
func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
packet, err := s.packetCipher.readPacket(s.seqNum, r)
s.seqNum++
if err == nil && len(packet) == 0 {
err = errors.New("ssh: zero length packet")
}
if len(packet) > 0 {
switch packet[0] {
case msgNewKeys:
select {
case cipher := <-s.pendingKeyChange:
s.packetCipher = cipher
default:
return nil, errors.New("ssh: got bogus newkeys message.")
}
case msgDisconnect:
// Transform a disconnect message into an
// error. Since this is lowest level at which
// we interpret message types, doing it here
// ensures that we don't have to handle it
// elsewhere.
var msg disconnectMsg
if err := Unmarshal(packet, &msg); err != nil {
return nil, err
}
return nil, &msg
}
}
// The packet may point to an internal buffer, so copy the
// packet out here.
fresh := make([]byte, len(packet))
copy(fresh, packet)
return fresh, err
}
func (t *transport) writePacket(packet []byte) error {
if debugTransport {
t.printPacket(packet, true)
}
return t.writer.writePacket(t.bufWriter, t.rand, packet)
}
func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error {
changeKeys := len(packet) > 0 && packet[0] == msgNewKeys
err := s.packetCipher.writePacket(s.seqNum, w, rand, packet)
if err != nil {
return err
}
if err = w.Flush(); err != nil {
return err
}
s.seqNum++
if changeKeys {
select {
case cipher := <-s.pendingKeyChange:
s.packetCipher = cipher
default:
panic("ssh: no key material for msgNewKeys")
}
}
return err
}
func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport {
t := &transport{
bufReader: bufio.NewReader(rwc),
bufWriter: bufio.NewWriter(rwc),
rand: rand,
reader: connectionState{
packetCipher: &streamPacketCipher{cipher: noneCipher{}},
pendingKeyChange: make(chan packetCipher, 1),
},
writer: connectionState{
packetCipher: &streamPacketCipher{cipher: noneCipher{}},
pendingKeyChange: make(chan packetCipher, 1),
},
Closer: rwc,
}
t.isClient = isClient
if isClient {
t.reader.dir = serverKeys
t.writer.dir = clientKeys
} else {
t.reader.dir = clientKeys
t.writer.dir = serverKeys
}
return t
}
type direction struct {
ivTag []byte
keyTag []byte
macKeyTag []byte
}
var (
serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}}
clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}}
)
// generateKeys generates key material for IV, MAC and encryption.
func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) {
cipherMode := cipherModes[algs.Cipher]
macMode := macModes[algs.MAC]
iv = make([]byte, cipherMode.ivSize)
key = make([]byte, cipherMode.keySize)
macKey = make([]byte, macMode.keySize)
generateKeyMaterial(iv, d.ivTag, kex)
generateKeyMaterial(key, d.keyTag, kex)
generateKeyMaterial(macKey, d.macKeyTag, kex)
return
}
// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as
// described in RFC 4253, section 6.4. direction should either be serverKeys
// (to setup server->client keys) or clientKeys (for client->server keys).
func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) {
iv, key, macKey := generateKeys(d, algs, kex)
if algs.Cipher == gcmCipherID {
return newGCMCipher(iv, key, macKey)
}
if algs.Cipher == aes128cbcID {
return newAESCBCCipher(iv, key, macKey, algs)
}
if algs.Cipher == tripledescbcID {
return newTripleDESCBCCipher(iv, key, macKey, algs)
}
c := &streamPacketCipher{
mac: macModes[algs.MAC].new(macKey),
etm: macModes[algs.MAC].etm,
}
c.macResult = make([]byte, c.mac.Size())
var err error
c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv)
if err != nil {
return nil, err
}
return c, nil
}
// generateKeyMaterial fills out with key material generated from tag, K, H
// and sessionId, as specified in RFC 4253, section 7.2.
func generateKeyMaterial(out, tag []byte, r *kexResult) {
var digestsSoFar []byte
h := r.Hash.New()
for len(out) > 0 {
h.Reset()
h.Write(r.K)
h.Write(r.H)
if len(digestsSoFar) == 0 {
h.Write(tag)
h.Write(r.SessionID)
} else {
h.Write(digestsSoFar)
}
digest := h.Sum(nil)
n := copy(out, digest)
out = out[n:]
if len(out) > 0 {
digestsSoFar = append(digestsSoFar, digest...)
}
}
}
const packageVersion = "SSH-2.0-Go"
// Sends and receives a version line. The versionLine string should
// be US ASCII, start with "SSH-2.0-", and should not include a
// newline. exchangeVersions returns the other side's version line.
func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) {
// Contrary to the RFC, we do not ignore lines that don't
// start with "SSH-2.0-" to make the library usable with
// nonconforming servers.
for _, c := range versionLine {
// The spec disallows non US-ASCII chars, and
// specifically forbids null chars.
if c < 32 {
return nil, errors.New("ssh: junk character in version line")
}
}
if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil {
return
}
them, err = readVersion(rw)
return them, err
}
// maxVersionStringBytes is the maximum number of bytes that we'll
// accept as a version string. RFC 4253 section 4.2 limits this at 255
// chars
const maxVersionStringBytes = 255
// Read version string as specified by RFC 4253, section 4.2.
func readVersion(r io.Reader) ([]byte, error) {
versionString := make([]byte, 0, 64)
var ok bool
var buf [1]byte
for len(versionString) < maxVersionStringBytes {
_, err := io.ReadFull(r, buf[:])
if err != nil {
return nil, err
}
// The RFC says that the version should be terminated with \r\n
// but several SSH servers actually only send a \n.
if buf[0] == '\n' {
ok = true
break
}
// non ASCII chars are disallowed, but we are lenient,
// since Go doesn't use null-terminated strings.
// The RFC allows a comment after a space, however,
// all of it (version and comments) goes into the
// session hash.
versionString = append(versionString, buf[0])
}
if !ok {
return nil, errors.New("ssh: overflow reading version string")
}
// There might be a '\r' on the end which we should remove.
if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' {
versionString = versionString[:len(versionString)-1]
}
return versionString, nil
}

59238
vendor/google.golang.org/api/compute/v1/compute-gen.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,38 +0,0 @@
// Copyright 2012 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package transport contains HTTP transports used to make
// authenticated API requests.
package transport
import (
"errors"
"net/http"
)
// APIKey is an HTTP Transport which wraps an underlying transport and
// appends an API Key "key" parameter to the URL of outgoing requests.
type APIKey struct {
// Key is the API Key to set on requests.
Key string
// Transport is the underlying HTTP transport.
// If nil, http.DefaultTransport is used.
Transport http.RoundTripper
}
func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) {
rt := t.Transport
if rt == nil {
rt = http.DefaultTransport
if rt == nil {
return nil, errors.New("googleapi/transport: no Transport specified or available")
}
}
newReq := *req
args := newReq.URL.Query()
args.Set("key", t.Key)
newReq.URL.RawQuery = args.Encode()
return rt.RoundTrip(&newReq)
}

View File

@ -1,59 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"errors"
"google.golang.org/grpc/naming"
)
// PoolResolver provides a fixed list of addresses to load balance between
// and does not provide further updates.
type PoolResolver struct {
poolSize int
dialOpt *DialSettings
ch chan []*naming.Update
}
// NewPoolResolver returns a PoolResolver
// This is an EXPERIMENTAL API and may be changed or removed in the future.
func NewPoolResolver(size int, o *DialSettings) *PoolResolver {
return &PoolResolver{poolSize: size, dialOpt: o}
}
// Resolve returns a Watcher for the endpoint defined by the DialSettings
// provided to NewPoolResolver.
func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) {
if r.dialOpt.Endpoint == "" {
return nil, errors.New("No endpoint configured")
}
addrs := make([]*naming.Update, 0, r.poolSize)
for i := 0; i < r.poolSize; i++ {
addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i})
}
r.ch = make(chan []*naming.Update, 1)
r.ch <- addrs
return r, nil
}
// Next returns a static list of updates on the first call,
// and blocks indefinitely until Close is called on subsequent calls.
func (r *PoolResolver) Next() ([]*naming.Update, error) {
return <-r.ch, nil
}
func (r *PoolResolver) Close() {
close(r.ch)
}

View File

@ -1,37 +0,0 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package internal supports the options and transport packages.
package internal
import (
"net/http"
"golang.org/x/oauth2"
"google.golang.org/grpc"
)
// DialSettings holds information needed to establish a connection with a
// Google API service.
type DialSettings struct {
Endpoint string
Scopes []string
ServiceAccountJSONFilename string // if set, TokenSource is ignored.
TokenSource oauth2.TokenSource
UserAgent string
APIKey string
HTTPClient *http.Client
GRPCDialOpts []grpc.DialOption
GRPCConn *grpc.ClientConn
}

View File

@ -1,231 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package iterator provides support for standard Google API iterators.
// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines.
package iterator
import (
"errors"
"fmt"
"reflect"
)
// Done is returned by an iterator's Next method when the iteration is
// complete; when there are no more items to return.
var Done = errors.New("no more items in iterator")
// We don't support mixed calls to Next and NextPage because they play
// with the paging state in incompatible ways.
var errMixed = errors.New("iterator: Next and NextPage called on same iterator")
// PageInfo contains information about an iterator's paging state.
type PageInfo struct {
// Token is the token used to retrieve the next page of items from the
// API. You may set Token immediately after creating an iterator to
// begin iteration at a particular point. If Token is the empty string,
// the iterator will begin with the first eligible item.
//
// The result of setting Token after the first call to Next is undefined.
//
// After the underlying API method is called to retrieve a page of items,
// Token is set to the next-page token in the response.
Token string
// MaxSize is the maximum number of items returned by a call to the API.
// Set MaxSize as a hint to optimize the buffering behavior of the iterator.
// If zero, the page size is determined by the underlying service.
//
// Use Pager to retrieve a page of a specific, exact size.
MaxSize int
// The error state of the iterator. Manipulated by PageInfo.next and Pager.
// This is a latch: it starts as nil, and once set should never change.
err error
// If true, no more calls to fetch should be made. Set to true when fetch
// returns an empty page token. The iterator is Done when this is true AND
// the buffer is empty.
atEnd bool
// Function that fetches a page from the underlying service. It should pass
// the pageSize and pageToken arguments to the service, fill the buffer
// with the results from the call, and return the next-page token returned
// by the service. The function must not remove any existing items from the
// buffer. If the underlying RPC takes an int32 page size, pageSize should
// be silently truncated.
fetch func(pageSize int, pageToken string) (nextPageToken string, err error)
// Function that clears the iterator's buffer, returning any currently buffered items.
bufLen func() int
// Function that returns the buffer, after setting the buffer variable to nil.
takeBuf func() interface{}
// Set to true on first call to PageInfo.next or Pager.NextPage. Used to check
// for calls to both Next and NextPage with the same iterator.
nextCalled, nextPageCalled bool
}
// NewPageInfo exposes internals for iterator implementations.
// It is not a stable interface.
var NewPageInfo = newPageInfo
// If an iterator can support paging, its iterator-creating method should call
// this (via the NewPageInfo variable above).
//
// The fetch, bufLen and takeBuf arguments provide access to the
// iterator's internal slice of buffered items. They behave as described in
// PageInfo, above.
//
// The return value is the PageInfo.next method bound to the returned PageInfo value.
// (Returning it avoids exporting PageInfo.next.)
func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (*PageInfo, func() error) {
pi := &PageInfo{
fetch: fetch,
bufLen: bufLen,
takeBuf: takeBuf,
}
return pi, pi.next
}
// Remaining returns the number of items available before the iterator makes another API call.
func (pi *PageInfo) Remaining() int { return pi.bufLen() }
// next provides support for an iterator's Next function. An iterator's Next
// should return the error returned by next if non-nil; else it can assume
// there is at least one item in its buffer, and it should return that item and
// remove it from the buffer.
func (pi *PageInfo) next() error {
pi.nextCalled = true
if pi.err != nil { // Once we get an error, always return it.
// TODO(jba): fix so users can retry on transient errors? Probably not worth it.
return pi.err
}
if pi.nextPageCalled {
pi.err = errMixed
return pi.err
}
// Loop until we get some items or reach the end.
for pi.bufLen() == 0 && !pi.atEnd {
if err := pi.fill(pi.MaxSize); err != nil {
pi.err = err
return pi.err
}
if pi.Token == "" {
pi.atEnd = true
}
}
// Either the buffer is non-empty or pi.atEnd is true (or both).
if pi.bufLen() == 0 {
// The buffer is empty and pi.atEnd is true, i.e. the service has no
// more items.
pi.err = Done
}
return pi.err
}
// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the
// next-page token returned by the call.
// If fill returns a non-nil error, the buffer will be empty.
func (pi *PageInfo) fill(size int) error {
tok, err := pi.fetch(size, pi.Token)
if err != nil {
pi.takeBuf() // clear the buffer
return err
}
pi.Token = tok
return nil
}
// Pageable is implemented by iterators that support paging.
type Pageable interface {
// PageInfo returns paging information associated with the iterator.
PageInfo() *PageInfo
}
// Pager supports retrieving iterator items a page at a time.
type Pager struct {
pageInfo *PageInfo
pageSize int
}
// NewPager returns a pager that uses iter. Calls to its NextPage method will
// obtain exactly pageSize items, unless fewer remain. The pageToken argument
// indicates where to start the iteration. Pass the empty string to start at
// the beginning, or pass a token retrieved from a call to Pager.NextPage.
//
// If you use an iterator with a Pager, you must not call Next on the iterator.
func NewPager(iter Pageable, pageSize int, pageToken string) *Pager {
p := &Pager{
pageInfo: iter.PageInfo(),
pageSize: pageSize,
}
p.pageInfo.Token = pageToken
if pageSize <= 0 {
p.pageInfo.err = errors.New("iterator: page size must be positive")
}
return p
}
// NextPage retrieves a sequence of items from the iterator and appends them
// to slicep, which must be a pointer to a slice of the iterator's item type.
// Exactly p.pageSize items will be appended, unless fewer remain.
//
// The first return value is the page token to use for the next page of items.
// If empty, there are no more pages. Aside from checking for the end of the
// iteration, the returned page token is only needed if the iteration is to be
// resumed a later time, in another context (possibly another process).
//
// The second return value is non-nil if an error occurred. It will never be
// the special iterator sentinel value Done. To recognize the end of the
// iteration, compare nextPageToken to the empty string.
//
// It is possible for NextPage to return a single zero-length page along with
// an empty page token when there are no more items in the iteration.
func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) {
p.pageInfo.nextPageCalled = true
if p.pageInfo.err != nil {
return "", p.pageInfo.err
}
if p.pageInfo.nextCalled {
p.pageInfo.err = errMixed
return "", p.pageInfo.err
}
if p.pageInfo.bufLen() > 0 {
return "", errors.New("must call NextPage with an empty buffer")
}
// The buffer must be empty here, so takeBuf is a no-op. We call it just to get
// the buffer's type.
wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type())
if slicep == nil {
return "", errors.New("nil passed to Pager.NextPage")
}
vslicep := reflect.ValueOf(slicep)
if vslicep.Type() != wantSliceType {
return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep)
}
for p.pageInfo.bufLen() < p.pageSize {
if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil {
p.pageInfo.err = err
return "", p.pageInfo.err
}
if p.pageInfo.Token == "" {
break
}
}
e := vslicep.Elem()
e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf())))
return p.pageInfo.Token, nil
}

View File

@ -1,156 +0,0 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package option contains options for Google API clients.
package option
import (
"net/http"
"golang.org/x/oauth2"
"google.golang.org/api/internal"
"google.golang.org/grpc"
)
// A ClientOption is an option for a Google API client.
type ClientOption interface {
Apply(*internal.DialSettings)
}
// WithTokenSource returns a ClientOption that specifies an OAuth2 token
// source to be used as the basis for authentication.
func WithTokenSource(s oauth2.TokenSource) ClientOption {
return withTokenSource{s}
}
type withTokenSource struct{ ts oauth2.TokenSource }
func (w withTokenSource) Apply(o *internal.DialSettings) {
o.TokenSource = w.ts
}
// WithServiceAccountFile returns a ClientOption that uses a Google service
// account credentials file to authenticate.
// Use WithTokenSource with a token source created from
// golang.org/x/oauth2/google.JWTConfigFromJSON
// if reading the file from disk is not an option.
func WithServiceAccountFile(filename string) ClientOption {
return withServiceAccountFile(filename)
}
type withServiceAccountFile string
func (w withServiceAccountFile) Apply(o *internal.DialSettings) {
o.ServiceAccountJSONFilename = string(w)
}
// WithEndpoint returns a ClientOption that overrides the default endpoint
// to be used for a service.
func WithEndpoint(url string) ClientOption {
return withEndpoint(url)
}
type withEndpoint string
func (w withEndpoint) Apply(o *internal.DialSettings) {
o.Endpoint = string(w)
}
// WithScopes returns a ClientOption that overrides the default OAuth2 scopes
// to be used for a service.
func WithScopes(scope ...string) ClientOption {
return withScopes(scope)
}
type withScopes []string
func (w withScopes) Apply(o *internal.DialSettings) {
s := make([]string, len(w))
copy(s, w)
o.Scopes = s
}
// WithUserAgent returns a ClientOption that sets the User-Agent.
func WithUserAgent(ua string) ClientOption {
return withUA(ua)
}
type withUA string
func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) }
// WithHTTPClient returns a ClientOption that specifies the HTTP client to use
// as the basis of communications. This option may only be used with services
// that support HTTP as their communication transport. When used, the
// WithHTTPClient option takes precedent over all other supplied options.
func WithHTTPClient(client *http.Client) ClientOption {
return withHTTPClient{client}
}
type withHTTPClient struct{ client *http.Client }
func (w withHTTPClient) Apply(o *internal.DialSettings) {
o.HTTPClient = w.client
}
// WithGRPCConn returns a ClientOption that specifies the gRPC client
// connection to use as the basis of communications. This option many only be
// used with services that support gRPC as their communication transport. When
// used, the WithGRPCConn option takes precedent over all other supplied
// options.
func WithGRPCConn(conn *grpc.ClientConn) ClientOption {
return withGRPCConn{conn}
}
type withGRPCConn struct{ conn *grpc.ClientConn }
func (w withGRPCConn) Apply(o *internal.DialSettings) {
o.GRPCConn = w.conn
}
// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption
// to an underlying gRPC dial. It does not work with WithGRPCConn.
func WithGRPCDialOption(opt grpc.DialOption) ClientOption {
return withGRPCDialOption{opt}
}
type withGRPCDialOption struct{ opt grpc.DialOption }
func (w withGRPCDialOption) Apply(o *internal.DialSettings) {
o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt)
}
// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC
// connections that requests will be balanced between.
// This is an EXPERIMENTAL API and may be changed or removed in the future.
func WithGRPCConnectionPool(size int) ClientOption {
return withGRPCConnectionPool(size)
}
type withGRPCConnectionPool int
func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) {
balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o))
o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer))
}
// WithAPIKey returns a ClientOption that specifies an API key to be used
// as the basis for authentication.
func WithAPIKey(apiKey string) ClientOption {
return withAPIKey(apiKey)
}
type withAPIKey string
func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) }

View File

@ -1,201 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package transport supports network connections to HTTP and GRPC servers.
// This package is not intended for use by end developers. Use the
// google.golang.org/api/option package to configure API clients.
package transport
import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/oauth"
gtransport "google.golang.org/api/googleapi/transport"
"google.golang.org/api/internal"
"google.golang.org/api/option"
)
// NewHTTPClient returns an HTTP client for use communicating with a Google cloud
// service, configured with the given ClientOptions. It also returns the endpoint
// for the service as specified in the options.
func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) {
var o internal.DialSettings
for _, opt := range opts {
opt.Apply(&o)
}
if o.GRPCConn != nil {
return nil, "", errors.New("unsupported gRPC connection specified")
}
// TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided?
if o.HTTPClient != nil {
return o.HTTPClient, o.Endpoint, nil
}
if o.APIKey != "" {
hc := &http.Client{
Transport: &gtransport.APIKey{
Key: o.APIKey,
Transport: userAgentTransport{
base: baseTransport(ctx),
userAgent: o.UserAgent,
},
},
}
return hc, o.Endpoint, nil
}
if o.ServiceAccountJSONFilename != "" {
ts, err := serviceAcctTokenSource(ctx, o.ServiceAccountJSONFilename, o.Scopes...)
if err != nil {
return nil, "", err
}
o.TokenSource = ts
}
if o.TokenSource == nil {
var err error
o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...)
if err != nil {
return nil, "", fmt.Errorf("google.DefaultTokenSource: %v", err)
}
}
hc := &http.Client{
Transport: &oauth2.Transport{
Source: o.TokenSource,
Base: userAgentTransport{
base: baseTransport(ctx),
userAgent: o.UserAgent,
},
},
}
return hc, o.Endpoint, nil
}
type userAgentTransport struct {
userAgent string
base http.RoundTripper
}
func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) {
rt := t.base
if rt == nil {
return nil, errors.New("transport: no Transport specified")
}
if t.userAgent == "" {
return rt.RoundTrip(req)
}
newReq := *req
newReq.Header = make(http.Header)
for k, vv := range req.Header {
newReq.Header[k] = vv
}
// TODO(cbro): append to existing User-Agent header?
newReq.Header["User-Agent"] = []string{t.userAgent}
return rt.RoundTrip(&newReq)
}
// Set at init time by dial_appengine.go. If nil, we're not on App Engine.
var appengineDialerHook func(context.Context) grpc.DialOption
var appengineUrlfetchHook func(context.Context) http.RoundTripper
// baseTransport returns the base HTTP transport.
// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport.
func baseTransport(ctx context.Context) http.RoundTripper {
if appengineUrlfetchHook != nil {
return appengineUrlfetchHook(ctx)
}
return http.DefaultTransport
}
// DialGRPC returns a GRPC connection for use communicating with a Google cloud
// service, configured with the given ClientOptions.
func DialGRPC(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) {
var o internal.DialSettings
for _, opt := range opts {
opt.Apply(&o)
}
if o.HTTPClient != nil {
return nil, errors.New("unsupported HTTP client specified")
}
if o.GRPCConn != nil {
return o.GRPCConn, nil
}
if o.ServiceAccountJSONFilename != "" {
ts, err := serviceAcctTokenSource(ctx, o.ServiceAccountJSONFilename, o.Scopes...)
if err != nil {
return nil, err
}
o.TokenSource = ts
}
if o.TokenSource == nil {
var err error
o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...)
if err != nil {
return nil, fmt.Errorf("google.DefaultTokenSource: %v", err)
}
}
grpcOpts := []grpc.DialOption{
grpc.WithPerRPCCredentials(oauth.TokenSource{o.TokenSource}),
grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")),
}
if appengineDialerHook != nil {
// Use the Socket API on App Engine.
grpcOpts = append(grpcOpts, appengineDialerHook(ctx))
}
grpcOpts = append(grpcOpts, o.GRPCDialOpts...)
if o.UserAgent != "" {
grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent))
}
return grpc.DialContext(ctx, o.Endpoint, grpcOpts...)
}
func serviceAcctTokenSource(ctx context.Context, filename string, scope ...string) (oauth2.TokenSource, error) {
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("cannot read service account file: %v", err)
}
cfg, err := google.JWTConfigFromJSON(data, scope...)
if err != nil {
return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err)
}
return cfg.TokenSource(ctx), nil
}
// DialGRPCInsecure returns an insecure GRPC connection for use communicating
// with fake or mock Google cloud service implementations, such as emulators.
// The connection is configured with the given ClientOptions.
func DialGRPCInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) {
var o internal.DialSettings
for _, opt := range opts {
opt.Apply(&o)
}
if o.HTTPClient != nil {
return nil, errors.New("unsupported HTTP client specified")
}
if o.GRPCConn != nil {
return o.GRPCConn, nil
}
grpcOpts := []grpc.DialOption{grpc.WithInsecure()}
grpcOpts = append(grpcOpts, o.GRPCDialOpts...)
if o.UserAgent != "" {
grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent))
}
return grpc.DialContext(ctx, o.Endpoint, grpcOpts...)
}

View File

@ -1,40 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build appengine
package transport
import (
"net"
"net/http"
"time"
"golang.org/x/net/context"
"google.golang.org/appengine/socket"
"google.golang.org/appengine/urlfetch"
"google.golang.org/grpc"
)
func init() {
appengineDialerHook = func(ctx context.Context) grpc.DialOption {
return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return socket.DialTimeout(ctx, "tcp", addr, timeout)
})
}
appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper {
return &urlfetch.Transport{Context: ctx}
}
}

View File

@ -1,180 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
// Package oauth implements gRPC credentials using OAuth.
package oauth
import (
"fmt"
"io/ioutil"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"golang.org/x/oauth2/jwt"
"google.golang.org/grpc/credentials"
)
// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource.
type TokenSource struct {
oauth2.TokenSource
}
// GetRequestMetadata gets the request metadata as a map from a TokenSource.
func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
token, err := ts.Token()
if err != nil {
return nil, err
}
return map[string]string{
"authorization": token.Type() + " " + token.AccessToken,
}, nil
}
// RequireTransportSecurity indicates whether the credentials requires transport security.
func (ts TokenSource) RequireTransportSecurity() bool {
return true
}
type jwtAccess struct {
jsonKey []byte
}
// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile.
func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) {
jsonKey, err := ioutil.ReadFile(keyFile)
if err != nil {
return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err)
}
return NewJWTAccessFromKey(jsonKey)
}
// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey.
func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) {
return jwtAccess{jsonKey}, nil
}
func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0])
if err != nil {
return nil, err
}
token, err := ts.Token()
if err != nil {
return nil, err
}
return map[string]string{
"authorization": token.TokenType + " " + token.AccessToken,
}, nil
}
func (j jwtAccess) RequireTransportSecurity() bool {
return true
}
// oauthAccess supplies PerRPCCredentials from a given token.
type oauthAccess struct {
token oauth2.Token
}
// NewOauthAccess constructs the PerRPCCredentials using a given token.
func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials {
return oauthAccess{token: *token}
}
func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
return map[string]string{
"authorization": oa.token.TokenType + " " + oa.token.AccessToken,
}, nil
}
func (oa oauthAccess) RequireTransportSecurity() bool {
return true
}
// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from
// Google Compute Engine (GCE)'s metadata server. It is only valid to use this
// if your program is running on a GCE instance.
// TODO(dsymonds): Deprecate and remove this.
func NewComputeEngine() credentials.PerRPCCredentials {
return TokenSource{google.ComputeTokenSource("")}
}
// serviceAccount represents PerRPCCredentials via JWT signing key.
type serviceAccount struct {
config *jwt.Config
}
func (s serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
token, err := s.config.TokenSource(ctx).Token()
if err != nil {
return nil, err
}
return map[string]string{
"authorization": token.TokenType + " " + token.AccessToken,
}, nil
}
func (s serviceAccount) RequireTransportSecurity() bool {
return true
}
// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice
// from a Google Developers service account.
func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) {
config, err := google.JWTConfigFromJSON(jsonKey, scope...)
if err != nil {
return nil, err
}
return serviceAccount{config: config}, nil
}
// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file
// of a Google Developers service account.
func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) {
jsonKey, err := ioutil.ReadFile(keyFile)
if err != nil {
return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err)
}
return NewServiceAccountFromKey(jsonKey, scope...)
}
// NewApplicationDefault returns "Application Default Credentials". For more
// detail, see https://developers.google.com/accounts/docs/application-default-credentials.
func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) {
t, err := google.DefaultTokenSource(ctx, scope...)
if err != nil {
return nil, err
}
return TokenSource{t}, nil
}