mirror of
https://github.com/mudler/luet.git
synced 2025-09-05 09:10:43 +00:00
update vendor
This commit is contained in:
1
vendor/github.com/docker/distribution/manifest/doc.go
generated
vendored
1
vendor/github.com/docker/distribution/manifest/doc.go
generated
vendored
@@ -1 +0,0 @@
|
||||
package manifest
|
287
vendor/github.com/docker/distribution/manifest/schema1/config_builder.go
generated
vendored
287
vendor/github.com/docker/distribution/manifest/schema1/config_builder.go
generated
vendored
@@ -1,287 +0,0 @@
|
||||
package schema1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha512"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type diffID digest.Digest
|
||||
|
||||
// gzippedEmptyTar is a gzip-compressed version of an empty tar file
|
||||
// (1024 NULL bytes)
|
||||
var gzippedEmptyTar = []byte{
|
||||
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
|
||||
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
|
||||
}
|
||||
|
||||
// digestSHA256GzippedEmptyTar is the canonical sha256 digest of
|
||||
// gzippedEmptyTar
|
||||
const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
|
||||
|
||||
// configManifestBuilder is a type for constructing manifests from an image
|
||||
// configuration and generic descriptors.
|
||||
type configManifestBuilder struct {
|
||||
// bs is a BlobService used to create empty layer tars in the
|
||||
// blob store if necessary.
|
||||
bs distribution.BlobService
|
||||
// pk is the libtrust private key used to sign the final manifest.
|
||||
pk libtrust.PrivateKey
|
||||
// configJSON is configuration supplied when the ManifestBuilder was
|
||||
// created.
|
||||
configJSON []byte
|
||||
// ref contains the name and optional tag provided to NewConfigManifestBuilder.
|
||||
ref reference.Named
|
||||
// descriptors is the set of descriptors referencing the layers.
|
||||
descriptors []distribution.Descriptor
|
||||
// emptyTarDigest is set to a valid digest if an empty tar has been
|
||||
// put in the blob store; otherwise it is empty.
|
||||
emptyTarDigest digest.Digest
|
||||
}
|
||||
|
||||
// NewConfigManifestBuilder is used to build new manifests for the current
|
||||
// schema version from an image configuration and a set of descriptors.
|
||||
// It takes a BlobService so that it can add an empty tar to the blob store
|
||||
// if the resulting manifest needs empty layers.
|
||||
func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder {
|
||||
return &configManifestBuilder{
|
||||
bs: bs,
|
||||
pk: pk,
|
||||
configJSON: configJSON,
|
||||
ref: ref,
|
||||
}
|
||||
}
|
||||
|
||||
// Build produces a final manifest from the given references
|
||||
func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) {
|
||||
type imageRootFS struct {
|
||||
Type string `json:"type"`
|
||||
DiffIDs []diffID `json:"diff_ids,omitempty"`
|
||||
BaseLayer string `json:"base_layer,omitempty"`
|
||||
}
|
||||
|
||||
type imageHistory struct {
|
||||
Created time.Time `json:"created"`
|
||||
Author string `json:"author,omitempty"`
|
||||
CreatedBy string `json:"created_by,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
EmptyLayer bool `json:"empty_layer,omitempty"`
|
||||
}
|
||||
|
||||
type imageConfig struct {
|
||||
RootFS *imageRootFS `json:"rootfs,omitempty"`
|
||||
History []imageHistory `json:"history,omitempty"`
|
||||
Architecture string `json:"architecture,omitempty"`
|
||||
}
|
||||
|
||||
var img imageConfig
|
||||
|
||||
if err := json.Unmarshal(mb.configJSON, &img); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(img.History) == 0 {
|
||||
return nil, errors.New("empty history when trying to create schema1 manifest")
|
||||
}
|
||||
|
||||
if len(img.RootFS.DiffIDs) != len(mb.descriptors) {
|
||||
return nil, fmt.Errorf("number of descriptors and number of layers in rootfs must match: len(%v) != len(%v)", img.RootFS.DiffIDs, mb.descriptors)
|
||||
}
|
||||
|
||||
// Generate IDs for each layer
|
||||
// For non-top-level layers, create fake V1Compatibility strings that
|
||||
// fit the format and don't collide with anything else, but don't
|
||||
// result in runnable images on their own.
|
||||
type v1Compatibility struct {
|
||||
ID string `json:"id"`
|
||||
Parent string `json:"parent,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
Created time.Time `json:"created"`
|
||||
ContainerConfig struct {
|
||||
Cmd []string
|
||||
} `json:"container_config,omitempty"`
|
||||
Author string `json:"author,omitempty"`
|
||||
ThrowAway bool `json:"throwaway,omitempty"`
|
||||
}
|
||||
|
||||
fsLayerList := make([]FSLayer, len(img.History))
|
||||
history := make([]History, len(img.History))
|
||||
|
||||
parent := ""
|
||||
layerCounter := 0
|
||||
for i, h := range img.History[:len(img.History)-1] {
|
||||
var blobsum digest.Digest
|
||||
if h.EmptyLayer {
|
||||
if blobsum, err = mb.emptyTar(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if len(img.RootFS.DiffIDs) <= layerCounter {
|
||||
return nil, errors.New("too many non-empty layers in History section")
|
||||
}
|
||||
blobsum = mb.descriptors[layerCounter].Digest
|
||||
layerCounter++
|
||||
}
|
||||
|
||||
v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex()
|
||||
|
||||
if i == 0 && img.RootFS.BaseLayer != "" {
|
||||
// windows-only baselayer setup
|
||||
baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer))
|
||||
parent = fmt.Sprintf("%x", baseID[:32])
|
||||
}
|
||||
|
||||
v1Compatibility := v1Compatibility{
|
||||
ID: v1ID,
|
||||
Parent: parent,
|
||||
Comment: h.Comment,
|
||||
Created: h.Created,
|
||||
Author: h.Author,
|
||||
}
|
||||
v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
|
||||
if h.EmptyLayer {
|
||||
v1Compatibility.ThrowAway = true
|
||||
}
|
||||
jsonBytes, err := json.Marshal(&v1Compatibility)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reversedIndex := len(img.History) - i - 1
|
||||
history[reversedIndex].V1Compatibility = string(jsonBytes)
|
||||
fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum}
|
||||
|
||||
parent = v1ID
|
||||
}
|
||||
|
||||
latestHistory := img.History[len(img.History)-1]
|
||||
|
||||
var blobsum digest.Digest
|
||||
if latestHistory.EmptyLayer {
|
||||
if blobsum, err = mb.emptyTar(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if len(img.RootFS.DiffIDs) <= layerCounter {
|
||||
return nil, errors.New("too many non-empty layers in History section")
|
||||
}
|
||||
blobsum = mb.descriptors[layerCounter].Digest
|
||||
}
|
||||
|
||||
fsLayerList[0] = FSLayer{BlobSum: blobsum}
|
||||
dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON)))
|
||||
|
||||
// Top-level v1compatibility string should be a modified version of the
|
||||
// image config.
|
||||
transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
history[0].V1Compatibility = string(transformedConfig)
|
||||
|
||||
tag := ""
|
||||
if tagged, isTagged := mb.ref.(reference.Tagged); isTagged {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
|
||||
mfst := Manifest{
|
||||
Versioned: manifest.Versioned{
|
||||
SchemaVersion: 1,
|
||||
},
|
||||
Name: mb.ref.Name(),
|
||||
Tag: tag,
|
||||
Architecture: img.Architecture,
|
||||
FSLayers: fsLayerList,
|
||||
History: history,
|
||||
}
|
||||
|
||||
return Sign(&mfst, mb.pk)
|
||||
}
|
||||
|
||||
// emptyTar pushes a compressed empty tar to the blob store if one doesn't
|
||||
// already exist, and returns its blobsum.
|
||||
func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) {
|
||||
if mb.emptyTarDigest != "" {
|
||||
// Already put an empty tar
|
||||
return mb.emptyTarDigest, nil
|
||||
}
|
||||
|
||||
descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar)
|
||||
switch err {
|
||||
case nil:
|
||||
mb.emptyTarDigest = descriptor.Digest
|
||||
return descriptor.Digest, nil
|
||||
case distribution.ErrBlobUnknown:
|
||||
// nop
|
||||
default:
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Add gzipped empty tar to the blob store
|
||||
descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
mb.emptyTarDigest = descriptor.Digest
|
||||
|
||||
return descriptor.Digest, nil
|
||||
}
|
||||
|
||||
// AppendReference adds a reference to the current ManifestBuilder
|
||||
func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error {
|
||||
descriptor := d.Descriptor()
|
||||
|
||||
if err := descriptor.Digest.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mb.descriptors = append(mb.descriptors, descriptor)
|
||||
return nil
|
||||
}
|
||||
|
||||
// References returns the current references added to this builder
|
||||
func (mb *configManifestBuilder) References() []distribution.Descriptor {
|
||||
return mb.descriptors
|
||||
}
|
||||
|
||||
// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON
|
||||
func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
|
||||
// Top-level v1compatibility string should be a modified version of the
|
||||
// image config.
|
||||
var configAsMap map[string]*json.RawMessage
|
||||
if err := json.Unmarshal(configJSON, &configAsMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete fields that didn't exist in old manifest
|
||||
delete(configAsMap, "rootfs")
|
||||
delete(configAsMap, "history")
|
||||
configAsMap["id"] = rawJSON(v1ID)
|
||||
if parentV1ID != "" {
|
||||
configAsMap["parent"] = rawJSON(parentV1ID)
|
||||
}
|
||||
if throwaway {
|
||||
configAsMap["throwaway"] = rawJSON(true)
|
||||
}
|
||||
|
||||
return json.Marshal(configAsMap)
|
||||
}
|
||||
|
||||
func rawJSON(value interface{}) *json.RawMessage {
|
||||
jsonval, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return (*json.RawMessage)(&jsonval)
|
||||
}
|
184
vendor/github.com/docker/distribution/manifest/schema1/manifest.go
generated
vendored
184
vendor/github.com/docker/distribution/manifest/schema1/manifest.go
generated
vendored
@@ -1,184 +0,0 @@
|
||||
package schema1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
// MediaTypeManifest specifies the mediaType for the current version. Note
|
||||
// that for schema version 1, the the media is optionally "application/json".
|
||||
MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json"
|
||||
// MediaTypeSignedManifest specifies the mediatype for current SignedManifest version
|
||||
MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
|
||||
// MediaTypeManifestLayer specifies the media type for manifest layers
|
||||
MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar"
|
||||
)
|
||||
|
||||
var (
|
||||
// SchemaVersion provides a pre-initialized version structure for this
|
||||
// packages version of the manifest.
|
||||
SchemaVersion = manifest.Versioned{
|
||||
SchemaVersion: 1,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
|
||||
sm := new(SignedManifest)
|
||||
err := sm.UnmarshalJSON(b)
|
||||
if err != nil {
|
||||
return nil, distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
desc := distribution.Descriptor{
|
||||
Digest: digest.FromBytes(sm.Canonical),
|
||||
Size: int64(len(sm.Canonical)),
|
||||
MediaType: MediaTypeSignedManifest,
|
||||
}
|
||||
return sm, desc, err
|
||||
}
|
||||
err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Unable to register manifest: %s", err))
|
||||
}
|
||||
err = distribution.RegisterManifestSchema("", schema1Func)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Unable to register manifest: %s", err))
|
||||
}
|
||||
err = distribution.RegisterManifestSchema("application/json", schema1Func)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Unable to register manifest: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
// FSLayer is a container struct for BlobSums defined in an image manifest
|
||||
type FSLayer struct {
|
||||
// BlobSum is the tarsum of the referenced filesystem image layer
|
||||
BlobSum digest.Digest `json:"blobSum"`
|
||||
}
|
||||
|
||||
// History stores unstructured v1 compatibility information
|
||||
type History struct {
|
||||
// V1Compatibility is the raw v1 compatibility information
|
||||
V1Compatibility string `json:"v1Compatibility"`
|
||||
}
|
||||
|
||||
// Manifest provides the base accessible fields for working with V2 image
|
||||
// format in the registry.
|
||||
type Manifest struct {
|
||||
manifest.Versioned
|
||||
|
||||
// Name is the name of the image's repository
|
||||
Name string `json:"name"`
|
||||
|
||||
// Tag is the tag of the image specified by this manifest
|
||||
Tag string `json:"tag"`
|
||||
|
||||
// Architecture is the host architecture on which this image is intended to
|
||||
// run
|
||||
Architecture string `json:"architecture"`
|
||||
|
||||
// FSLayers is a list of filesystem layer blobSums contained in this image
|
||||
FSLayers []FSLayer `json:"fsLayers"`
|
||||
|
||||
// History is a list of unstructured historical data for v1 compatibility
|
||||
History []History `json:"history"`
|
||||
}
|
||||
|
||||
// SignedManifest provides an envelope for a signed image manifest, including
|
||||
// the format sensitive raw bytes.
|
||||
type SignedManifest struct {
|
||||
Manifest
|
||||
|
||||
// Canonical is the canonical byte representation of the ImageManifest,
|
||||
// without any attached signatures. The manifest byte
|
||||
// representation cannot change or it will have to be re-signed.
|
||||
Canonical []byte `json:"-"`
|
||||
|
||||
// all contains the byte representation of the Manifest including signatures
|
||||
// and is returned by Payload()
|
||||
all []byte
|
||||
}
|
||||
|
||||
// UnmarshalJSON populates a new SignedManifest struct from JSON data.
|
||||
func (sm *SignedManifest) UnmarshalJSON(b []byte) error {
|
||||
sm.all = make([]byte, len(b), len(b))
|
||||
// store manifest and signatures in all
|
||||
copy(sm.all, b)
|
||||
|
||||
jsig, err := libtrust.ParsePrettySignature(b, "signatures")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Resolve the payload in the manifest.
|
||||
bytes, err := jsig.Payload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// sm.Canonical stores the canonical manifest JSON
|
||||
sm.Canonical = make([]byte, len(bytes), len(bytes))
|
||||
copy(sm.Canonical, bytes)
|
||||
|
||||
// Unmarshal canonical JSON into Manifest object
|
||||
var manifest Manifest
|
||||
if err := json.Unmarshal(sm.Canonical, &manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sm.Manifest = manifest
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// References returns the descriptors of this manifests references
|
||||
func (sm SignedManifest) References() []distribution.Descriptor {
|
||||
dependencies := make([]distribution.Descriptor, len(sm.FSLayers))
|
||||
for i, fsLayer := range sm.FSLayers {
|
||||
dependencies[i] = distribution.Descriptor{
|
||||
MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar",
|
||||
Digest: fsLayer.BlobSum,
|
||||
}
|
||||
}
|
||||
|
||||
return dependencies
|
||||
|
||||
}
|
||||
|
||||
// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner
|
||||
// contents. Applications requiring a marshaled signed manifest should simply
|
||||
// use Raw directly, since the the content produced by json.Marshal will be
|
||||
// compacted and will fail signature checks.
|
||||
func (sm *SignedManifest) MarshalJSON() ([]byte, error) {
|
||||
if len(sm.all) > 0 {
|
||||
return sm.all, nil
|
||||
}
|
||||
|
||||
// If the raw data is not available, just dump the inner content.
|
||||
return json.Marshal(&sm.Manifest)
|
||||
}
|
||||
|
||||
// Payload returns the signed content of the signed manifest.
|
||||
func (sm SignedManifest) Payload() (string, []byte, error) {
|
||||
return MediaTypeSignedManifest, sm.all, nil
|
||||
}
|
||||
|
||||
// Signatures returns the signatures as provided by
|
||||
// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws
|
||||
// signatures.
|
||||
func (sm *SignedManifest) Signatures() ([][]byte, error) {
|
||||
jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Resolve the payload in the manifest.
|
||||
return jsig.Signatures()
|
||||
}
|
98
vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go
generated
vendored
98
vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go
generated
vendored
@@ -1,98 +0,0 @@
|
||||
package schema1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// referenceManifestBuilder is a type for constructing manifests from schema1
|
||||
// dependencies.
|
||||
type referenceManifestBuilder struct {
|
||||
Manifest
|
||||
pk libtrust.PrivateKey
|
||||
}
|
||||
|
||||
// NewReferenceManifestBuilder is used to build new manifests for the current
|
||||
// schema version using schema1 dependencies.
|
||||
func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder {
|
||||
tag := ""
|
||||
if tagged, isTagged := ref.(reference.Tagged); isTagged {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
|
||||
return &referenceManifestBuilder{
|
||||
Manifest: Manifest{
|
||||
Versioned: manifest.Versioned{
|
||||
SchemaVersion: 1,
|
||||
},
|
||||
Name: ref.Name(),
|
||||
Tag: tag,
|
||||
Architecture: architecture,
|
||||
},
|
||||
pk: pk,
|
||||
}
|
||||
}
|
||||
|
||||
func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) {
|
||||
m := mb.Manifest
|
||||
if len(m.FSLayers) == 0 {
|
||||
return nil, errors.New("cannot build manifest with zero layers or history")
|
||||
}
|
||||
|
||||
m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers))
|
||||
m.History = make([]History, len(mb.Manifest.History))
|
||||
copy(m.FSLayers, mb.Manifest.FSLayers)
|
||||
copy(m.History, mb.Manifest.History)
|
||||
|
||||
return Sign(&m, mb.pk)
|
||||
}
|
||||
|
||||
// AppendReference adds a reference to the current ManifestBuilder
|
||||
func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error {
|
||||
r, ok := d.(Reference)
|
||||
if !ok {
|
||||
return fmt.Errorf("Unable to add non-reference type to v1 builder")
|
||||
}
|
||||
|
||||
// Entries need to be prepended
|
||||
mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...)
|
||||
mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// References returns the current references added to this builder
|
||||
func (mb *referenceManifestBuilder) References() []distribution.Descriptor {
|
||||
refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers))
|
||||
for i := range mb.Manifest.FSLayers {
|
||||
layerDigest := mb.Manifest.FSLayers[i].BlobSum
|
||||
history := mb.Manifest.History[i]
|
||||
ref := Reference{layerDigest, 0, history}
|
||||
refs[i] = ref.Descriptor()
|
||||
}
|
||||
return refs
|
||||
}
|
||||
|
||||
// Reference describes a manifest v2, schema version 1 dependency.
|
||||
// An FSLayer associated with a history entry.
|
||||
type Reference struct {
|
||||
Digest digest.Digest
|
||||
Size int64 // if we know it, set it for the descriptor.
|
||||
History History
|
||||
}
|
||||
|
||||
// Descriptor describes a reference
|
||||
func (r Reference) Descriptor() distribution.Descriptor {
|
||||
return distribution.Descriptor{
|
||||
MediaType: MediaTypeManifestLayer,
|
||||
Digest: r.Digest,
|
||||
Size: r.Size,
|
||||
}
|
||||
}
|
68
vendor/github.com/docker/distribution/manifest/schema1/sign.go
generated
vendored
68
vendor/github.com/docker/distribution/manifest/schema1/sign.go
generated
vendored
@@ -1,68 +0,0 @@
|
||||
package schema1
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
// Sign signs the manifest with the provided private key, returning a
|
||||
// SignedManifest. This typically won't be used within the registry, except
|
||||
// for testing.
|
||||
func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) {
|
||||
p, err := json.MarshalIndent(m, "", " ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
js, err := libtrust.NewJSONSignature(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := js.Sign(pk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pretty, err := js.PrettySignature("signatures")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SignedManifest{
|
||||
Manifest: *m,
|
||||
all: pretty,
|
||||
Canonical: p,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SignWithChain signs the manifest with the given private key and x509 chain.
|
||||
// The public key of the first element in the chain must be the public key
|
||||
// corresponding with the sign key.
|
||||
func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) {
|
||||
p, err := json.MarshalIndent(m, "", " ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
js, err := libtrust.NewJSONSignature(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := js.SignWithChain(key, chain); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pretty, err := js.PrettySignature("signatures")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SignedManifest{
|
||||
Manifest: *m,
|
||||
all: pretty,
|
||||
Canonical: p,
|
||||
}, nil
|
||||
}
|
32
vendor/github.com/docker/distribution/manifest/schema1/verify.go
generated
vendored
32
vendor/github.com/docker/distribution/manifest/schema1/verify.go
generated
vendored
@@ -1,32 +0,0 @@
|
||||
package schema1
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Verify verifies the signature of the signed manifest returning the public
|
||||
// keys used during signing.
|
||||
func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) {
|
||||
js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
|
||||
if err != nil {
|
||||
logrus.WithField("err", err).Debugf("(*SignedManifest).Verify")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return js.Verify()
|
||||
}
|
||||
|
||||
// VerifyChains verifies the signature of the signed manifest against the
|
||||
// certificate pool returning the list of verified chains. Signatures without
|
||||
// an x509 chain are not checked.
|
||||
func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) {
|
||||
js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return js.VerifyChains(ca)
|
||||
}
|
85
vendor/github.com/docker/distribution/manifest/schema2/builder.go
generated
vendored
85
vendor/github.com/docker/distribution/manifest/schema2/builder.go
generated
vendored
@@ -1,85 +0,0 @@
|
||||
package schema2
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// builder is a type for constructing manifests.
|
||||
type builder struct {
|
||||
// bs is a BlobService used to publish the configuration blob.
|
||||
bs distribution.BlobService
|
||||
|
||||
// configMediaType is media type used to describe configuration
|
||||
configMediaType string
|
||||
|
||||
// configJSON references
|
||||
configJSON []byte
|
||||
|
||||
// dependencies is a list of descriptors that gets built by successive
|
||||
// calls to AppendReference. In case of image configuration these are layers.
|
||||
dependencies []distribution.Descriptor
|
||||
}
|
||||
|
||||
// NewManifestBuilder is used to build new manifests for the current schema
|
||||
// version. It takes a BlobService so it can publish the configuration blob
|
||||
// as part of the Build process.
|
||||
func NewManifestBuilder(bs distribution.BlobService, configMediaType string, configJSON []byte) distribution.ManifestBuilder {
|
||||
mb := &builder{
|
||||
bs: bs,
|
||||
configMediaType: configMediaType,
|
||||
configJSON: make([]byte, len(configJSON)),
|
||||
}
|
||||
copy(mb.configJSON, configJSON)
|
||||
|
||||
return mb
|
||||
}
|
||||
|
||||
// Build produces a final manifest from the given references.
|
||||
func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
|
||||
m := Manifest{
|
||||
Versioned: SchemaVersion,
|
||||
Layers: make([]distribution.Descriptor, len(mb.dependencies)),
|
||||
}
|
||||
copy(m.Layers, mb.dependencies)
|
||||
|
||||
configDigest := digest.FromBytes(mb.configJSON)
|
||||
|
||||
var err error
|
||||
m.Config, err = mb.bs.Stat(ctx, configDigest)
|
||||
switch err {
|
||||
case nil:
|
||||
// Override MediaType, since Put always replaces the specified media
|
||||
// type with application/octet-stream in the descriptor it returns.
|
||||
m.Config.MediaType = mb.configMediaType
|
||||
return FromStruct(m)
|
||||
case distribution.ErrBlobUnknown:
|
||||
// nop
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add config to the blob store
|
||||
m.Config, err = mb.bs.Put(ctx, mb.configMediaType, mb.configJSON)
|
||||
// Override MediaType, since Put always replaces the specified media
|
||||
// type with application/octet-stream in the descriptor it returns.
|
||||
m.Config.MediaType = mb.configMediaType
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return FromStruct(m)
|
||||
}
|
||||
|
||||
// AppendReference adds a reference to the current ManifestBuilder.
|
||||
func (mb *builder) AppendReference(d distribution.Describable) error {
|
||||
mb.dependencies = append(mb.dependencies, d.Descriptor())
|
||||
return nil
|
||||
}
|
||||
|
||||
// References returns the current references added to this builder.
|
||||
func (mb *builder) References() []distribution.Descriptor {
|
||||
return mb.dependencies
|
||||
}
|
144
vendor/github.com/docker/distribution/manifest/schema2/manifest.go
generated
vendored
144
vendor/github.com/docker/distribution/manifest/schema2/manifest.go
generated
vendored
@@ -1,144 +0,0 @@
|
||||
package schema2
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
// MediaTypeManifest specifies the mediaType for the current version.
|
||||
MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
|
||||
|
||||
// MediaTypeImageConfig specifies the mediaType for the image configuration.
|
||||
MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json"
|
||||
|
||||
// MediaTypePluginConfig specifies the mediaType for plugin configuration.
|
||||
MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json"
|
||||
|
||||
// MediaTypeLayer is the mediaType used for layers referenced by the
|
||||
// manifest.
|
||||
MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||
|
||||
// MediaTypeForeignLayer is the mediaType used for layers that must be
|
||||
// downloaded from foreign URLs.
|
||||
MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
|
||||
|
||||
// MediaTypeUncompressedLayer is the mediaType used for layers which
|
||||
// are not compressed.
|
||||
MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
|
||||
)
|
||||
|
||||
var (
|
||||
// SchemaVersion provides a pre-initialized version structure for this
|
||||
// packages version of the manifest.
|
||||
SchemaVersion = manifest.Versioned{
|
||||
SchemaVersion: 2,
|
||||
MediaType: MediaTypeManifest,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
|
||||
m := new(DeserializedManifest)
|
||||
err := m.UnmarshalJSON(b)
|
||||
if err != nil {
|
||||
return nil, distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
dgst := digest.FromBytes(b)
|
||||
return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err
|
||||
}
|
||||
err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Unable to register manifest: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
// Manifest defines a schema2 manifest.
|
||||
type Manifest struct {
|
||||
manifest.Versioned
|
||||
|
||||
// Config references the image configuration as a blob.
|
||||
Config distribution.Descriptor `json:"config"`
|
||||
|
||||
// Layers lists descriptors for the layers referenced by the
|
||||
// configuration.
|
||||
Layers []distribution.Descriptor `json:"layers"`
|
||||
}
|
||||
|
||||
// References returns the descriptors of this manifests references.
|
||||
func (m Manifest) References() []distribution.Descriptor {
|
||||
references := make([]distribution.Descriptor, 0, 1+len(m.Layers))
|
||||
references = append(references, m.Config)
|
||||
references = append(references, m.Layers...)
|
||||
return references
|
||||
}
|
||||
|
||||
// Target returns the target of this manifest.
|
||||
func (m Manifest) Target() distribution.Descriptor {
|
||||
return m.Config
|
||||
}
|
||||
|
||||
// DeserializedManifest wraps Manifest with a copy of the original JSON.
|
||||
// It satisfies the distribution.Manifest interface.
|
||||
type DeserializedManifest struct {
|
||||
Manifest
|
||||
|
||||
// canonical is the canonical byte representation of the Manifest.
|
||||
canonical []byte
|
||||
}
|
||||
|
||||
// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
|
||||
// DeserializedManifest which contains the manifest and its JSON representation.
|
||||
func FromStruct(m Manifest) (*DeserializedManifest, error) {
|
||||
var deserialized DeserializedManifest
|
||||
deserialized.Manifest = m
|
||||
|
||||
var err error
|
||||
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
|
||||
return &deserialized, err
|
||||
}
|
||||
|
||||
// UnmarshalJSON populates a new Manifest struct from JSON data.
|
||||
func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
|
||||
m.canonical = make([]byte, len(b), len(b))
|
||||
// store manifest in canonical
|
||||
copy(m.canonical, b)
|
||||
|
||||
// Unmarshal canonical JSON into Manifest object
|
||||
var manifest Manifest
|
||||
if err := json.Unmarshal(m.canonical, &manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if manifest.MediaType != MediaTypeManifest {
|
||||
return fmt.Errorf("mediaType in manifest should be '%s' not '%s'",
|
||||
MediaTypeManifest, manifest.MediaType)
|
||||
|
||||
}
|
||||
|
||||
m.Manifest = manifest
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON returns the contents of canonical. If canonical is empty,
|
||||
// marshals the inner contents.
|
||||
func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
|
||||
if len(m.canonical) > 0 {
|
||||
return m.canonical, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("JSON representation not initialized in DeserializedManifest")
|
||||
}
|
||||
|
||||
// Payload returns the raw content of the manifest. The contents can be used to
|
||||
// calculate the content identifier.
|
||||
func (m DeserializedManifest) Payload() (string, []byte, error) {
|
||||
return m.MediaType, m.canonical, nil
|
||||
}
|
12
vendor/github.com/docker/distribution/manifest/versioned.go
generated
vendored
12
vendor/github.com/docker/distribution/manifest/versioned.go
generated
vendored
@@ -1,12 +0,0 @@
|
||||
package manifest
|
||||
|
||||
// Versioned provides a struct with the manifest schemaVersion and mediaType.
|
||||
// Incoming content with unknown schema version can be decoded against this
|
||||
// struct to check the version.
|
||||
type Versioned struct {
|
||||
// SchemaVersion is the image manifest schema that this image follows
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
|
||||
// MediaType is the media type of this schema.
|
||||
MediaType string `json:"mediaType,omitempty"`
|
||||
}
|
190
vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
generated
vendored
190
vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
generated
vendored
@@ -1,190 +0,0 @@
|
||||
package stdcopy // import "github.com/docker/docker/pkg/stdcopy"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// StdType is the type of standard stream
|
||||
// a writer can multiplex to.
|
||||
type StdType byte
|
||||
|
||||
const (
|
||||
// Stdin represents standard input stream type.
|
||||
Stdin StdType = iota
|
||||
// Stdout represents standard output stream type.
|
||||
Stdout
|
||||
// Stderr represents standard error steam type.
|
||||
Stderr
|
||||
// Systemerr represents errors originating from the system that make it
|
||||
// into the multiplexed stream.
|
||||
Systemerr
|
||||
|
||||
stdWriterPrefixLen = 8
|
||||
stdWriterFdIndex = 0
|
||||
stdWriterSizeIndex = 4
|
||||
|
||||
startingBufLen = 32*1024 + stdWriterPrefixLen + 1
|
||||
)
|
||||
|
||||
var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }}
|
||||
|
||||
// stdWriter is wrapper of io.Writer with extra customized info.
|
||||
type stdWriter struct {
|
||||
io.Writer
|
||||
prefix byte
|
||||
}
|
||||
|
||||
// Write sends the buffer to the underneath writer.
|
||||
// It inserts the prefix header before the buffer,
|
||||
// so stdcopy.StdCopy knows where to multiplex the output.
|
||||
// It makes stdWriter to implement io.Writer.
|
||||
func (w *stdWriter) Write(p []byte) (n int, err error) {
|
||||
if w == nil || w.Writer == nil {
|
||||
return 0, errors.New("Writer not instantiated")
|
||||
}
|
||||
if p == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
|
||||
binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p)))
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Write(header[:])
|
||||
buf.Write(p)
|
||||
|
||||
n, err = w.Writer.Write(buf.Bytes())
|
||||
n -= stdWriterPrefixLen
|
||||
if n < 0 {
|
||||
n = 0
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
bufPool.Put(buf)
|
||||
return
|
||||
}
|
||||
|
||||
// NewStdWriter instantiates a new Writer.
|
||||
// Everything written to it will be encapsulated using a custom format,
|
||||
// and written to the underlying `w` stream.
|
||||
// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
|
||||
// `t` indicates the id of the stream to encapsulate.
|
||||
// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
|
||||
func NewStdWriter(w io.Writer, t StdType) io.Writer {
|
||||
return &stdWriter{
|
||||
Writer: w,
|
||||
prefix: byte(t),
|
||||
}
|
||||
}
|
||||
|
||||
// StdCopy is a modified version of io.Copy.
|
||||
//
|
||||
// StdCopy will demultiplex `src`, assuming that it contains two streams,
|
||||
// previously multiplexed together using a StdWriter instance.
|
||||
// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
|
||||
//
|
||||
// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
|
||||
// In other words: if `err` is non nil, it indicates a real underlying error.
|
||||
//
|
||||
// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
|
||||
func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
|
||||
var (
|
||||
buf = make([]byte, startingBufLen)
|
||||
bufLen = len(buf)
|
||||
nr, nw int
|
||||
er, ew error
|
||||
out io.Writer
|
||||
frameSize int
|
||||
)
|
||||
|
||||
for {
|
||||
// Make sure we have at least a full header
|
||||
for nr < stdWriterPrefixLen {
|
||||
var nr2 int
|
||||
nr2, er = src.Read(buf[nr:])
|
||||
nr += nr2
|
||||
if er == io.EOF {
|
||||
if nr < stdWriterPrefixLen {
|
||||
return written, nil
|
||||
}
|
||||
break
|
||||
}
|
||||
if er != nil {
|
||||
return 0, er
|
||||
}
|
||||
}
|
||||
|
||||
stream := StdType(buf[stdWriterFdIndex])
|
||||
// Check the first byte to know where to write
|
||||
switch stream {
|
||||
case Stdin:
|
||||
fallthrough
|
||||
case Stdout:
|
||||
// Write on stdout
|
||||
out = dstout
|
||||
case Stderr:
|
||||
// Write on stderr
|
||||
out = dsterr
|
||||
case Systemerr:
|
||||
// If we're on Systemerr, we won't write anywhere.
|
||||
// NB: if this code changes later, make sure you don't try to write
|
||||
// to outstream if Systemerr is the stream
|
||||
out = nil
|
||||
default:
|
||||
return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
|
||||
}
|
||||
|
||||
// Retrieve the size of the frame
|
||||
frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
|
||||
|
||||
// Check if the buffer is big enough to read the frame.
|
||||
// Extend it if necessary.
|
||||
if frameSize+stdWriterPrefixLen > bufLen {
|
||||
buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
|
||||
bufLen = len(buf)
|
||||
}
|
||||
|
||||
// While the amount of bytes read is less than the size of the frame + header, we keep reading
|
||||
for nr < frameSize+stdWriterPrefixLen {
|
||||
var nr2 int
|
||||
nr2, er = src.Read(buf[nr:])
|
||||
nr += nr2
|
||||
if er == io.EOF {
|
||||
if nr < frameSize+stdWriterPrefixLen {
|
||||
return written, nil
|
||||
}
|
||||
break
|
||||
}
|
||||
if er != nil {
|
||||
return 0, er
|
||||
}
|
||||
}
|
||||
|
||||
// we might have an error from the source mixed up in our multiplexed
|
||||
// stream. if we do, return it.
|
||||
if stream == Systemerr {
|
||||
return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen]))
|
||||
}
|
||||
|
||||
// Write the retrieved frame (without header)
|
||||
nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
|
||||
if ew != nil {
|
||||
return 0, ew
|
||||
}
|
||||
|
||||
// If the frame has not been fully written: error
|
||||
if nw != frameSize {
|
||||
return 0, io.ErrShortWrite
|
||||
}
|
||||
written += int64(nw)
|
||||
|
||||
// Move the rest of the buffer to the beginning
|
||||
copy(buf, buf[frameSize+stdWriterPrefixLen:])
|
||||
// Move the index
|
||||
nr -= frameSize + stdWriterPrefixLen
|
||||
}
|
||||
}
|
13
vendor/github.com/docker/libtrust/CONTRIBUTING.md
generated
vendored
13
vendor/github.com/docker/libtrust/CONTRIBUTING.md
generated
vendored
@@ -1,13 +0,0 @@
|
||||
# Contributing to libtrust
|
||||
|
||||
Want to hack on libtrust? Awesome! Here are instructions to get you
|
||||
started.
|
||||
|
||||
libtrust is a part of the [Docker](https://www.docker.com) project, and follows
|
||||
the same rules and principles. If you're already familiar with the way
|
||||
Docker does things, you'll feel right at home.
|
||||
|
||||
Otherwise, go read
|
||||
[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
|
||||
|
||||
Happy hacking!
|
191
vendor/github.com/docker/libtrust/LICENSE
generated
vendored
191
vendor/github.com/docker/libtrust/LICENSE
generated
vendored
@@ -1,191 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2014 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
3
vendor/github.com/docker/libtrust/MAINTAINERS
generated
vendored
3
vendor/github.com/docker/libtrust/MAINTAINERS
generated
vendored
@@ -1,3 +0,0 @@
|
||||
Solomon Hykes <solomon@docker.com>
|
||||
Josh Hawn <josh@docker.com> (github: jlhawn)
|
||||
Derek McGowan <derek@docker.com> (github: dmcgowan)
|
22
vendor/github.com/docker/libtrust/README.md
generated
vendored
22
vendor/github.com/docker/libtrust/README.md
generated
vendored
@@ -1,22 +0,0 @@
|
||||
# libtrust
|
||||
|
||||
> **WARNING** this library is no longer actively developed, and will be integrated
|
||||
> in the [docker/distribution][https://www.github.com/docker/distribution]
|
||||
> repository in future.
|
||||
|
||||
Libtrust is library for managing authentication and authorization using public key cryptography.
|
||||
|
||||
Authentication is handled using the identity attached to the public key.
|
||||
Libtrust provides multiple methods to prove possession of the private key associated with an identity.
|
||||
- TLS x509 certificates
|
||||
- Signature verification
|
||||
- Key Challenge
|
||||
|
||||
Authorization and access control is managed through a distributed trust graph.
|
||||
Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access.
|
||||
|
||||
## Copyright and license
|
||||
|
||||
Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
|
||||
Docs released under Creative commons.
|
||||
|
175
vendor/github.com/docker/libtrust/certificates.go
generated
vendored
175
vendor/github.com/docker/libtrust/certificates.go
generated
vendored
@@ -1,175 +0,0 @@
|
||||
package libtrust
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type certTemplateInfo struct {
|
||||
commonName string
|
||||
domains []string
|
||||
ipAddresses []net.IP
|
||||
isCA bool
|
||||
clientAuth bool
|
||||
serverAuth bool
|
||||
}
|
||||
|
||||
func generateCertTemplate(info *certTemplateInfo) *x509.Certificate {
|
||||
// Generate a certificate template which is valid from the past week to
|
||||
// 10 years from now. The usage of the certificate depends on the
|
||||
// specified fields in the given certTempInfo object.
|
||||
var (
|
||||
keyUsage x509.KeyUsage
|
||||
extKeyUsage []x509.ExtKeyUsage
|
||||
)
|
||||
|
||||
if info.isCA {
|
||||
keyUsage = x509.KeyUsageCertSign
|
||||
}
|
||||
|
||||
if info.clientAuth {
|
||||
extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth)
|
||||
}
|
||||
|
||||
if info.serverAuth {
|
||||
extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth)
|
||||
}
|
||||
|
||||
return &x509.Certificate{
|
||||
SerialNumber: big.NewInt(0),
|
||||
Subject: pkix.Name{
|
||||
CommonName: info.commonName,
|
||||
},
|
||||
NotBefore: time.Now().Add(-time.Hour * 24 * 7),
|
||||
NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10),
|
||||
DNSNames: info.domains,
|
||||
IPAddresses: info.ipAddresses,
|
||||
IsCA: info.isCA,
|
||||
KeyUsage: keyUsage,
|
||||
ExtKeyUsage: extKeyUsage,
|
||||
BasicConstraintsValid: info.isCA,
|
||||
}
|
||||
}
|
||||
|
||||
func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) {
|
||||
pubCertTemplate := generateCertTemplate(subInfo)
|
||||
privCertTemplate := generateCertTemplate(issInfo)
|
||||
|
||||
certDER, err := x509.CreateCertificate(
|
||||
rand.Reader, pubCertTemplate, privCertTemplate,
|
||||
pub.CryptoPublicKey(), priv.CryptoPrivateKey(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create certificate: %s", err)
|
||||
}
|
||||
|
||||
cert, err = x509.ParseCertificate(certDER)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse certificate: %s", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GenerateSelfSignedServerCert creates a self-signed certificate for the
|
||||
// given key which is to be used for TLS servers with the given domains and
|
||||
// IP addresses.
|
||||
func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) {
|
||||
info := &certTemplateInfo{
|
||||
commonName: key.KeyID(),
|
||||
domains: domains,
|
||||
ipAddresses: ipAddresses,
|
||||
serverAuth: true,
|
||||
}
|
||||
|
||||
return generateCert(key.PublicKey(), key, info, info)
|
||||
}
|
||||
|
||||
// GenerateSelfSignedClientCert creates a self-signed certificate for the
|
||||
// given key which is to be used for TLS clients.
|
||||
func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) {
|
||||
info := &certTemplateInfo{
|
||||
commonName: key.KeyID(),
|
||||
clientAuth: true,
|
||||
}
|
||||
|
||||
return generateCert(key.PublicKey(), key, info, info)
|
||||
}
|
||||
|
||||
// GenerateCACert creates a certificate which can be used as a trusted
|
||||
// certificate authority.
|
||||
func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) {
|
||||
subjectInfo := &certTemplateInfo{
|
||||
commonName: trustedKey.KeyID(),
|
||||
isCA: true,
|
||||
}
|
||||
issuerInfo := &certTemplateInfo{
|
||||
commonName: signer.KeyID(),
|
||||
}
|
||||
|
||||
return generateCert(trustedKey, signer, subjectInfo, issuerInfo)
|
||||
}
|
||||
|
||||
// GenerateCACertPool creates a certificate authority pool to be used for a
|
||||
// TLS configuration. Any self-signed certificates issued by the specified
|
||||
// trusted keys will be verified during a TLS handshake
|
||||
func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) {
|
||||
certPool := x509.NewCertPool()
|
||||
|
||||
for _, trustedKey := range trustedKeys {
|
||||
cert, err := GenerateCACert(signer, trustedKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate CA certificate: %s", err)
|
||||
}
|
||||
|
||||
certPool.AddCert(cert)
|
||||
}
|
||||
|
||||
return certPool, nil
|
||||
}
|
||||
|
||||
// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded
|
||||
// containing one or more certificates. The expected pem type is "CERTIFICATE".
|
||||
func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certificates := []*x509.Certificate{}
|
||||
var block *pem.Block
|
||||
block, b = pem.Decode(b)
|
||||
for ; block != nil; block, b = pem.Decode(b) {
|
||||
if block.Type == "CERTIFICATE" {
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certificates = append(certificates, cert)
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
|
||||
}
|
||||
}
|
||||
|
||||
return certificates, nil
|
||||
}
|
||||
|
||||
// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded
|
||||
// containing one or more certificates. The expected pem type is "CERTIFICATE".
|
||||
func LoadCertificatePool(filename string) (*x509.CertPool, error) {
|
||||
certs, err := LoadCertificateBundle(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
for _, cert := range certs {
|
||||
pool.AddCert(cert)
|
||||
}
|
||||
return pool, nil
|
||||
}
|
9
vendor/github.com/docker/libtrust/doc.go
generated
vendored
9
vendor/github.com/docker/libtrust/doc.go
generated
vendored
@@ -1,9 +0,0 @@
|
||||
/*
|
||||
Package libtrust provides an interface for managing authentication and
|
||||
authorization using public key cryptography. Authentication is handled
|
||||
using the identity attached to the public key and verified through TLS
|
||||
x509 certificates, a key challenge, or signature. Authorization and
|
||||
access control is managed through a trust graph distributed between
|
||||
both remote trust servers and locally cached and managed data.
|
||||
*/
|
||||
package libtrust
|
428
vendor/github.com/docker/libtrust/ec_key.go
generated
vendored
428
vendor/github.com/docker/libtrust/ec_key.go
generated
vendored
@@ -1,428 +0,0 @@
|
||||
package libtrust
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
/*
|
||||
* EC DSA PUBLIC KEY
|
||||
*/
|
||||
|
||||
// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital
|
||||
// signature algorithms.
|
||||
type ecPublicKey struct {
|
||||
*ecdsa.PublicKey
|
||||
curveName string
|
||||
signatureAlgorithm *signatureAlgorithm
|
||||
extended map[string]interface{}
|
||||
}
|
||||
|
||||
func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) {
|
||||
curve := cryptoPublicKey.Curve
|
||||
|
||||
switch {
|
||||
case curve == elliptic.P256():
|
||||
return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil
|
||||
case curve == elliptic.P384():
|
||||
return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil
|
||||
case curve == elliptic.P521():
|
||||
return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil
|
||||
default:
|
||||
return nil, errors.New("unsupported elliptic curve")
|
||||
}
|
||||
}
|
||||
|
||||
// KeyType returns the key type for elliptic curve keys, i.e., "EC".
|
||||
func (k *ecPublicKey) KeyType() string {
|
||||
return "EC"
|
||||
}
|
||||
|
||||
// CurveName returns the elliptic curve identifier.
|
||||
// Possible values are "P-256", "P-384", and "P-521".
|
||||
func (k *ecPublicKey) CurveName() string {
|
||||
return k.curveName
|
||||
}
|
||||
|
||||
// KeyID returns a distinct identifier which is unique to this Public Key.
|
||||
func (k *ecPublicKey) KeyID() string {
|
||||
return keyIDFromCryptoKey(k)
|
||||
}
|
||||
|
||||
func (k *ecPublicKey) String() string {
|
||||
return fmt.Sprintf("EC Public Key <%s>", k.KeyID())
|
||||
}
|
||||
|
||||
// Verify verifyies the signature of the data in the io.Reader using this
|
||||
// PublicKey. The alg parameter should identify the digital signature
|
||||
// algorithm which was used to produce the signature and should be supported
|
||||
// by this public key. Returns a nil error if the signature is valid.
|
||||
func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
|
||||
// For EC keys there is only one supported signature algorithm depending
|
||||
// on the curve parameters.
|
||||
if k.signatureAlgorithm.HeaderParam() != alg {
|
||||
return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg)
|
||||
}
|
||||
|
||||
// signature is the concatenation of (r, s), base64Url encoded.
|
||||
sigLength := len(signature)
|
||||
expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3)
|
||||
if sigLength != expectedOctetLength {
|
||||
return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength)
|
||||
}
|
||||
|
||||
rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:]
|
||||
r := new(big.Int).SetBytes(rBytes)
|
||||
s := new(big.Int).SetBytes(sBytes)
|
||||
|
||||
hasher := k.signatureAlgorithm.HashID().New()
|
||||
_, err := io.Copy(hasher, data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading data to sign: %s", err)
|
||||
}
|
||||
hash := hasher.Sum(nil)
|
||||
|
||||
if !ecdsa.Verify(k.PublicKey, hash, r, s) {
|
||||
return errors.New("invalid signature")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CryptoPublicKey returns the internal object which can be used as a
|
||||
// crypto.PublicKey for use with other standard library operations. The type
|
||||
// is either *rsa.PublicKey or *ecdsa.PublicKey
|
||||
func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey {
|
||||
return k.PublicKey
|
||||
}
|
||||
|
||||
func (k *ecPublicKey) toMap() map[string]interface{} {
|
||||
jwk := make(map[string]interface{})
|
||||
for k, v := range k.extended {
|
||||
jwk[k] = v
|
||||
}
|
||||
jwk["kty"] = k.KeyType()
|
||||
jwk["kid"] = k.KeyID()
|
||||
jwk["crv"] = k.CurveName()
|
||||
|
||||
xBytes := k.X.Bytes()
|
||||
yBytes := k.Y.Bytes()
|
||||
octetLength := (k.Params().BitSize + 7) >> 3
|
||||
// MUST include leading zeros in the output so that x, y are each
|
||||
// *octetLength* bytes long.
|
||||
xBuf := make([]byte, octetLength-len(xBytes), octetLength)
|
||||
yBuf := make([]byte, octetLength-len(yBytes), octetLength)
|
||||
xBuf = append(xBuf, xBytes...)
|
||||
yBuf = append(yBuf, yBytes...)
|
||||
|
||||
jwk["x"] = joseBase64UrlEncode(xBuf)
|
||||
jwk["y"] = joseBase64UrlEncode(yBuf)
|
||||
|
||||
return jwk
|
||||
}
|
||||
|
||||
// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
|
||||
// elliptic curve keys.
|
||||
func (k *ecPublicKey) MarshalJSON() (data []byte, err error) {
|
||||
return json.Marshal(k.toMap())
|
||||
}
|
||||
|
||||
// PEMBlock serializes this Public Key to DER-encoded PKIX format.
|
||||
func (k *ecPublicKey) PEMBlock() (*pem.Block, error) {
|
||||
derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err)
|
||||
}
|
||||
k.extended["kid"] = k.KeyID() // For display purposes.
|
||||
return createPemBlock("PUBLIC KEY", derBytes, k.extended)
|
||||
}
|
||||
|
||||
func (k *ecPublicKey) AddExtendedField(field string, value interface{}) {
|
||||
k.extended[field] = value
|
||||
}
|
||||
|
||||
func (k *ecPublicKey) GetExtendedField(field string) interface{} {
|
||||
v, ok := k.extended[field]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) {
|
||||
// JWK key type (kty) has already been determined to be "EC".
|
||||
// Need to extract 'crv', 'x', 'y', and 'kid' and check for
|
||||
// consistency.
|
||||
|
||||
// Get the curve identifier value.
|
||||
crv, err := stringFromMap(jwk, "crv")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err)
|
||||
}
|
||||
|
||||
var (
|
||||
curve elliptic.Curve
|
||||
sigAlg *signatureAlgorithm
|
||||
)
|
||||
|
||||
switch {
|
||||
case crv == "P-256":
|
||||
curve = elliptic.P256()
|
||||
sigAlg = es256
|
||||
case crv == "P-384":
|
||||
curve = elliptic.P384()
|
||||
sigAlg = es384
|
||||
case crv == "P-521":
|
||||
curve = elliptic.P521()
|
||||
sigAlg = es512
|
||||
default:
|
||||
return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv)
|
||||
}
|
||||
|
||||
// Get the X and Y coordinates for the public key point.
|
||||
xB64Url, err := stringFromMap(jwk, "x")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
|
||||
}
|
||||
x, err := parseECCoordinate(xB64Url, curve)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
|
||||
}
|
||||
|
||||
yB64Url, err := stringFromMap(jwk, "y")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
|
||||
}
|
||||
y, err := parseECCoordinate(yB64Url, curve)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
|
||||
}
|
||||
|
||||
key := &ecPublicKey{
|
||||
PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y},
|
||||
curveName: crv, signatureAlgorithm: sigAlg,
|
||||
}
|
||||
|
||||
// Key ID is optional too, but if it exists, it should match the key.
|
||||
_, ok := jwk["kid"]
|
||||
if ok {
|
||||
kid, err := stringFromMap(jwk, "kid")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK EC Public Key ID: %s", err)
|
||||
}
|
||||
if kid != key.KeyID() {
|
||||
return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid)
|
||||
}
|
||||
}
|
||||
|
||||
key.extended = jwk
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
/*
|
||||
* EC DSA PRIVATE KEY
|
||||
*/
|
||||
|
||||
// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature
|
||||
// algorithms.
|
||||
type ecPrivateKey struct {
|
||||
ecPublicKey
|
||||
*ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) {
|
||||
publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil
|
||||
}
|
||||
|
||||
// PublicKey returns the Public Key data associated with this Private Key.
|
||||
func (k *ecPrivateKey) PublicKey() PublicKey {
|
||||
return &k.ecPublicKey
|
||||
}
|
||||
|
||||
func (k *ecPrivateKey) String() string {
|
||||
return fmt.Sprintf("EC Private Key <%s>", k.KeyID())
|
||||
}
|
||||
|
||||
// Sign signs the data read from the io.Reader using a signature algorithm supported
|
||||
// by the elliptic curve private key. If the specified hashing algorithm is
|
||||
// supported by this key, that hash function is used to generate the signature
|
||||
// otherwise the the default hashing algorithm for this key is used. Returns
|
||||
// the signature and the name of the JWK signature algorithm used, e.g.,
|
||||
// "ES256", "ES384", "ES512".
|
||||
func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
|
||||
// Generate a signature of the data using the internal alg.
|
||||
// The given hashId is only a suggestion, and since EC keys only support
|
||||
// on signature/hash algorithm given the curve name, we disregard it for
|
||||
// the elliptic curve JWK signature implementation.
|
||||
hasher := k.signatureAlgorithm.HashID().New()
|
||||
_, err = io.Copy(hasher, data)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("error reading data to sign: %s", err)
|
||||
}
|
||||
hash := hasher.Sum(nil)
|
||||
|
||||
r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("error producing signature: %s", err)
|
||||
}
|
||||
rBytes, sBytes := r.Bytes(), s.Bytes()
|
||||
octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3
|
||||
// MUST include leading zeros in the output
|
||||
rBuf := make([]byte, octetLength-len(rBytes), octetLength)
|
||||
sBuf := make([]byte, octetLength-len(sBytes), octetLength)
|
||||
|
||||
rBuf = append(rBuf, rBytes...)
|
||||
sBuf = append(sBuf, sBytes...)
|
||||
|
||||
signature = append(rBuf, sBuf...)
|
||||
alg = k.signatureAlgorithm.HeaderParam()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// CryptoPrivateKey returns the internal object which can be used as a
|
||||
// crypto.PublicKey for use with other standard library operations. The type
|
||||
// is either *rsa.PublicKey or *ecdsa.PublicKey
|
||||
func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
|
||||
return k.PrivateKey
|
||||
}
|
||||
|
||||
func (k *ecPrivateKey) toMap() map[string]interface{} {
|
||||
jwk := k.ecPublicKey.toMap()
|
||||
|
||||
dBytes := k.D.Bytes()
|
||||
// The length of this octet string MUST be ceiling(log-base-2(n)/8)
|
||||
// octets (where n is the order of the curve). This is because the private
|
||||
// key d must be in the interval [1, n-1] so the bitlength of d should be
|
||||
// no larger than the bitlength of n-1. The easiest way to find the octet
|
||||
// length is to take bitlength(n-1), add 7 to force a carry, and shift this
|
||||
// bit sequence right by 3, which is essentially dividing by 8 and adding
|
||||
// 1 if there is any remainder. Thus, the private key value d should be
|
||||
// output to (bitlength(n-1)+7)>>3 octets.
|
||||
n := k.ecPublicKey.Params().N
|
||||
octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
|
||||
// Create a buffer with the necessary zero-padding.
|
||||
dBuf := make([]byte, octetLength-len(dBytes), octetLength)
|
||||
dBuf = append(dBuf, dBytes...)
|
||||
|
||||
jwk["d"] = joseBase64UrlEncode(dBuf)
|
||||
|
||||
return jwk
|
||||
}
|
||||
|
||||
// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
|
||||
// elliptic curve keys.
|
||||
func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) {
|
||||
return json.Marshal(k.toMap())
|
||||
}
|
||||
|
||||
// PEMBlock serializes this Private Key to DER-encoded PKIX format.
|
||||
func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) {
|
||||
derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err)
|
||||
}
|
||||
k.extended["keyID"] = k.KeyID() // For display purposes.
|
||||
return createPemBlock("EC PRIVATE KEY", derBytes, k.extended)
|
||||
}
|
||||
|
||||
func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) {
|
||||
dB64Url, err := stringFromMap(jwk, "d")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK EC Private Key: %s", err)
|
||||
}
|
||||
|
||||
// JWK key type (kty) has already been determined to be "EC".
|
||||
// Need to extract the public key information, then extract the private
|
||||
// key value 'd'.
|
||||
publicKey, err := ecPublicKeyFromMap(jwk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d, err := parseECPrivateParam(dB64Url, publicKey.Curve)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err)
|
||||
}
|
||||
|
||||
key := &ecPrivateKey{
|
||||
ecPublicKey: *publicKey,
|
||||
PrivateKey: &ecdsa.PrivateKey{
|
||||
PublicKey: *publicKey.PublicKey,
|
||||
D: d,
|
||||
},
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
/*
|
||||
* Key Generation Functions.
|
||||
*/
|
||||
|
||||
func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) {
|
||||
k = new(ecPrivateKey)
|
||||
k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey
|
||||
k.extended = make(map[string]interface{})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256.
|
||||
func GenerateECP256PrivateKey() (PrivateKey, error) {
|
||||
k, err := generateECPrivateKey(elliptic.P256())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating EC P-256 key: %s", err)
|
||||
}
|
||||
|
||||
k.curveName = "P-256"
|
||||
k.signatureAlgorithm = es256
|
||||
|
||||
return k, nil
|
||||
}
|
||||
|
||||
// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384.
|
||||
func GenerateECP384PrivateKey() (PrivateKey, error) {
|
||||
k, err := generateECPrivateKey(elliptic.P384())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating EC P-384 key: %s", err)
|
||||
}
|
||||
|
||||
k.curveName = "P-384"
|
||||
k.signatureAlgorithm = es384
|
||||
|
||||
return k, nil
|
||||
}
|
||||
|
||||
// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521.
|
||||
func GenerateECP521PrivateKey() (PrivateKey, error) {
|
||||
k, err := generateECPrivateKey(elliptic.P521())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating EC P-521 key: %s", err)
|
||||
}
|
||||
|
||||
k.curveName = "P-521"
|
||||
k.signatureAlgorithm = es512
|
||||
|
||||
return k, nil
|
||||
}
|
50
vendor/github.com/docker/libtrust/filter.go
generated
vendored
50
vendor/github.com/docker/libtrust/filter.go
generated
vendored
@@ -1,50 +0,0 @@
|
||||
package libtrust
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// FilterByHosts filters the list of PublicKeys to only those which contain a
|
||||
// 'hosts' pattern which matches the given host. If *includeEmpty* is true,
|
||||
// then keys which do not specify any hosts are also returned.
|
||||
func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) {
|
||||
filtered := make([]PublicKey, 0, len(keys))
|
||||
|
||||
for _, pubKey := range keys {
|
||||
var hosts []string
|
||||
switch v := pubKey.GetExtendedField("hosts").(type) {
|
||||
case []string:
|
||||
hosts = v
|
||||
case []interface{}:
|
||||
for _, value := range v {
|
||||
h, ok := value.(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
hosts = append(hosts, h)
|
||||
}
|
||||
}
|
||||
|
||||
if len(hosts) == 0 {
|
||||
if includeEmpty {
|
||||
filtered = append(filtered, pubKey)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if any hosts match pattern
|
||||
for _, hostPattern := range hosts {
|
||||
match, err := filepath.Match(hostPattern, host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if match {
|
||||
filtered = append(filtered, pubKey)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return filtered, nil
|
||||
}
|
56
vendor/github.com/docker/libtrust/hash.go
generated
vendored
56
vendor/github.com/docker/libtrust/hash.go
generated
vendored
@@ -1,56 +0,0 @@
|
||||
package libtrust
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
_ "crypto/sha256" // Registrer SHA224 and SHA256
|
||||
_ "crypto/sha512" // Registrer SHA384 and SHA512
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type signatureAlgorithm struct {
|
||||
algHeaderParam string
|
||||
hashID crypto.Hash
|
||||
}
|
||||
|
||||
func (h *signatureAlgorithm) HeaderParam() string {
|
||||
return h.algHeaderParam
|
||||
}
|
||||
|
||||
func (h *signatureAlgorithm) HashID() crypto.Hash {
|
||||
return h.hashID
|
||||
}
|
||||
|
||||
var (
|
||||
rs256 = &signatureAlgorithm{"RS256", crypto.SHA256}
|
||||
rs384 = &signatureAlgorithm{"RS384", crypto.SHA384}
|
||||
rs512 = &signatureAlgorithm{"RS512", crypto.SHA512}
|
||||
es256 = &signatureAlgorithm{"ES256", crypto.SHA256}
|
||||
es384 = &signatureAlgorithm{"ES384", crypto.SHA384}
|
||||
es512 = &signatureAlgorithm{"ES512", crypto.SHA512}
|
||||
)
|
||||
|
||||
func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) {
|
||||
switch {
|
||||
case alg == "RS256":
|
||||
return rs256, nil
|
||||
case alg == "RS384":
|
||||
return rs384, nil
|
||||
case alg == "RS512":
|
||||
return rs512, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg)
|
||||
}
|
||||
}
|
||||
|
||||
func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm {
|
||||
switch {
|
||||
case hashID == crypto.SHA512:
|
||||
return rs512
|
||||
case hashID == crypto.SHA384:
|
||||
return rs384
|
||||
case hashID == crypto.SHA256:
|
||||
fallthrough
|
||||
default:
|
||||
return rs256
|
||||
}
|
||||
}
|
657
vendor/github.com/docker/libtrust/jsonsign.go
generated
vendored
657
vendor/github.com/docker/libtrust/jsonsign.go
generated
vendored
@@ -1,657 +0,0 @@
|
||||
package libtrust
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidSignContent is used when the content to be signed is invalid.
|
||||
ErrInvalidSignContent = errors.New("invalid sign content")
|
||||
|
||||
// ErrInvalidJSONContent is used when invalid json is encountered.
|
||||
ErrInvalidJSONContent = errors.New("invalid json content")
|
||||
|
||||
// ErrMissingSignatureKey is used when the specified signature key
|
||||
// does not exist in the JSON content.
|
||||
ErrMissingSignatureKey = errors.New("missing signature key")
|
||||
)
|
||||
|
||||
type jsHeader struct {
|
||||
JWK PublicKey `json:"jwk,omitempty"`
|
||||
Algorithm string `json:"alg"`
|
||||
Chain []string `json:"x5c,omitempty"`
|
||||
}
|
||||
|
||||
type jsSignature struct {
|
||||
Header jsHeader `json:"header"`
|
||||
Signature string `json:"signature"`
|
||||
Protected string `json:"protected,omitempty"`
|
||||
}
|
||||
|
||||
type jsSignaturesSorted []jsSignature
|
||||
|
||||
func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] }
|
||||
func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) }
|
||||
|
||||
func (jsbkid jsSignaturesSorted) Less(i, j int) bool {
|
||||
ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID()
|
||||
si, sj := jsbkid[i].Signature, jsbkid[j].Signature
|
||||
|
||||
if ki == kj {
|
||||
return si < sj
|
||||
}
|
||||
|
||||
return ki < kj
|
||||
}
|
||||
|
||||
type signKey struct {
|
||||
PrivateKey
|
||||
Chain []*x509.Certificate
|
||||
}
|
||||
|
||||
// JSONSignature represents a signature of a json object.
|
||||
type JSONSignature struct {
|
||||
payload string
|
||||
signatures []jsSignature
|
||||
indent string
|
||||
formatLength int
|
||||
formatTail []byte
|
||||
}
|
||||
|
||||
func newJSONSignature() *JSONSignature {
|
||||
return &JSONSignature{
|
||||
signatures: make([]jsSignature, 0, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// Payload returns the encoded payload of the signature. This
|
||||
// payload should not be signed directly
|
||||
func (js *JSONSignature) Payload() ([]byte, error) {
|
||||
return joseBase64UrlDecode(js.payload)
|
||||
}
|
||||
|
||||
func (js *JSONSignature) protectedHeader() (string, error) {
|
||||
protected := map[string]interface{}{
|
||||
"formatLength": js.formatLength,
|
||||
"formatTail": joseBase64UrlEncode(js.formatTail),
|
||||
"time": time.Now().UTC().Format(time.RFC3339),
|
||||
}
|
||||
protectedBytes, err := json.Marshal(protected)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return joseBase64UrlEncode(protectedBytes), nil
|
||||
}
|
||||
|
||||
func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) {
|
||||
buf := make([]byte, len(js.payload)+len(protectedHeader)+1)
|
||||
copy(buf, protectedHeader)
|
||||
buf[len(protectedHeader)] = '.'
|
||||
copy(buf[len(protectedHeader)+1:], js.payload)
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// Sign adds a signature using the given private key.
|
||||
func (js *JSONSignature) Sign(key PrivateKey) error {
|
||||
protected, err := js.protectedHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signBytes, err := js.signBytes(protected)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
js.signatures = append(js.signatures, jsSignature{
|
||||
Header: jsHeader{
|
||||
JWK: key.PublicKey(),
|
||||
Algorithm: algorithm,
|
||||
},
|
||||
Signature: joseBase64UrlEncode(sigBytes),
|
||||
Protected: protected,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignWithChain adds a signature using the given private key
|
||||
// and setting the x509 chain. The public key of the first element
|
||||
// in the chain must be the public key corresponding with the sign key.
|
||||
func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error {
|
||||
// Ensure key.Chain[0] is public key for key
|
||||
//key.Chain.PublicKey
|
||||
//key.PublicKey().CryptoPublicKey()
|
||||
|
||||
// Verify chain
|
||||
protected, err := js.protectedHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signBytes, err := js.signBytes(protected)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
header := jsHeader{
|
||||
Chain: make([]string, len(chain)),
|
||||
Algorithm: algorithm,
|
||||
}
|
||||
|
||||
for i, cert := range chain {
|
||||
header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw)
|
||||
}
|
||||
|
||||
js.signatures = append(js.signatures, jsSignature{
|
||||
Header: header,
|
||||
Signature: joseBase64UrlEncode(sigBytes),
|
||||
Protected: protected,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies all the signatures and returns the list of
|
||||
// public keys used to sign. Any x509 chains are not checked.
|
||||
func (js *JSONSignature) Verify() ([]PublicKey, error) {
|
||||
keys := make([]PublicKey, len(js.signatures))
|
||||
for i, signature := range js.signatures {
|
||||
signBytes, err := js.signBytes(signature.Protected)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var publicKey PublicKey
|
||||
if len(signature.Header.Chain) > 0 {
|
||||
certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert, err := x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
publicKey, err = FromCryptoPublicKey(cert.PublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if signature.Header.JWK != nil {
|
||||
publicKey = signature.Header.JWK
|
||||
} else {
|
||||
return nil, errors.New("missing public key")
|
||||
}
|
||||
|
||||
sigBytes, err := joseBase64UrlDecode(signature.Signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keys[i] = publicKey
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// VerifyChains verifies all the signatures and the chains associated
|
||||
// with each signature and returns the list of verified chains.
|
||||
// Signatures without an x509 chain are not checked.
|
||||
func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) {
|
||||
chains := make([][]*x509.Certificate, 0, len(js.signatures))
|
||||
for _, signature := range js.signatures {
|
||||
signBytes, err := js.signBytes(signature.Protected)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var publicKey PublicKey
|
||||
if len(signature.Header.Chain) > 0 {
|
||||
certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert, err := x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
publicKey, err = FromCryptoPublicKey(cert.PublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
intermediates := x509.NewCertPool()
|
||||
if len(signature.Header.Chain) > 1 {
|
||||
intermediateChain := signature.Header.Chain[1:]
|
||||
for i := range intermediateChain {
|
||||
certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
intermediate, err := x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
intermediates.AddCert(intermediate)
|
||||
}
|
||||
}
|
||||
|
||||
verifyOptions := x509.VerifyOptions{
|
||||
Intermediates: intermediates,
|
||||
Roots: ca,
|
||||
}
|
||||
|
||||
verifiedChains, err := cert.Verify(verifyOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chains = append(chains, verifiedChains...)
|
||||
|
||||
sigBytes, err := joseBase64UrlDecode(signature.Signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return chains, nil
|
||||
}
|
||||
|
||||
// JWS returns JSON serialized JWS according to
|
||||
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2
|
||||
func (js *JSONSignature) JWS() ([]byte, error) {
|
||||
if len(js.signatures) == 0 {
|
||||
return nil, errors.New("missing signature")
|
||||
}
|
||||
|
||||
sort.Sort(jsSignaturesSorted(js.signatures))
|
||||
|
||||
jsonMap := map[string]interface{}{
|
||||
"payload": js.payload,
|
||||
"signatures": js.signatures,
|
||||
}
|
||||
|
||||
return json.MarshalIndent(jsonMap, "", " ")
|
||||
}
|
||||
|
||||
func notSpace(r rune) bool {
|
||||
return !unicode.IsSpace(r)
|
||||
}
|
||||
|
||||
func detectJSONIndent(jsonContent []byte) (indent string) {
|
||||
if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' {
|
||||
quoteIndex := bytes.IndexRune(jsonContent[1:], '"')
|
||||
if quoteIndex > 0 {
|
||||
indent = string(jsonContent[2 : quoteIndex+1])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type jsParsedHeader struct {
|
||||
JWK json.RawMessage `json:"jwk"`
|
||||
Algorithm string `json:"alg"`
|
||||
Chain []string `json:"x5c"`
|
||||
}
|
||||
|
||||
type jsParsedSignature struct {
|
||||
Header jsParsedHeader `json:"header"`
|
||||
Signature string `json:"signature"`
|
||||
Protected string `json:"protected"`
|
||||
}
|
||||
|
||||
// ParseJWS parses a JWS serialized JSON object into a Json Signature.
|
||||
func ParseJWS(content []byte) (*JSONSignature, error) {
|
||||
type jsParsed struct {
|
||||
Payload string `json:"payload"`
|
||||
Signatures []jsParsedSignature `json:"signatures"`
|
||||
}
|
||||
parsed := &jsParsed{}
|
||||
err := json.Unmarshal(content, parsed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(parsed.Signatures) == 0 {
|
||||
return nil, errors.New("missing signatures")
|
||||
}
|
||||
payload, err := joseBase64UrlDecode(parsed.Payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
js, err := NewJSONSignature(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
js.signatures = make([]jsSignature, len(parsed.Signatures))
|
||||
for i, signature := range parsed.Signatures {
|
||||
header := jsHeader{
|
||||
Algorithm: signature.Header.Algorithm,
|
||||
}
|
||||
if signature.Header.Chain != nil {
|
||||
header.Chain = signature.Header.Chain
|
||||
}
|
||||
if signature.Header.JWK != nil {
|
||||
publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
header.JWK = publicKey
|
||||
}
|
||||
js.signatures[i] = jsSignature{
|
||||
Header: header,
|
||||
Signature: signature.Signature,
|
||||
Protected: signature.Protected,
|
||||
}
|
||||
}
|
||||
|
||||
return js, nil
|
||||
}
|
||||
|
||||
// NewJSONSignature returns a new unsigned JWS from a json byte array.
|
||||
// JSONSignature will need to be signed before serializing or storing.
|
||||
// Optionally, one or more signatures can be provided as byte buffers,
|
||||
// containing serialized JWS signatures, to assemble a fully signed JWS
|
||||
// package. It is the callers responsibility to ensure uniqueness of the
|
||||
// provided signatures.
|
||||
func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) {
|
||||
var dataMap map[string]interface{}
|
||||
err := json.Unmarshal(content, &dataMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
js := newJSONSignature()
|
||||
js.indent = detectJSONIndent(content)
|
||||
|
||||
js.payload = joseBase64UrlEncode(content)
|
||||
|
||||
// Find trailing } and whitespace, put in protected header
|
||||
closeIndex := bytes.LastIndexFunc(content, notSpace)
|
||||
if content[closeIndex] != '}' {
|
||||
return nil, ErrInvalidJSONContent
|
||||
}
|
||||
lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace)
|
||||
if content[lastRuneIndex] == ',' {
|
||||
return nil, ErrInvalidJSONContent
|
||||
}
|
||||
js.formatLength = lastRuneIndex + 1
|
||||
js.formatTail = content[js.formatLength:]
|
||||
|
||||
if len(signatures) > 0 {
|
||||
for _, signature := range signatures {
|
||||
var parsedJSig jsParsedSignature
|
||||
|
||||
if err := json.Unmarshal(signature, &parsedJSig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(stevvooe): A lot of the code below is repeated in
|
||||
// ParseJWS. It will require more refactoring to fix that.
|
||||
jsig := jsSignature{
|
||||
Header: jsHeader{
|
||||
Algorithm: parsedJSig.Header.Algorithm,
|
||||
},
|
||||
Signature: parsedJSig.Signature,
|
||||
Protected: parsedJSig.Protected,
|
||||
}
|
||||
|
||||
if parsedJSig.Header.Chain != nil {
|
||||
jsig.Header.Chain = parsedJSig.Header.Chain
|
||||
}
|
||||
|
||||
if parsedJSig.Header.JWK != nil {
|
||||
publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jsig.Header.JWK = publicKey
|
||||
}
|
||||
|
||||
js.signatures = append(js.signatures, jsig)
|
||||
}
|
||||
}
|
||||
|
||||
return js, nil
|
||||
}
|
||||
|
||||
// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or
|
||||
// struct. JWS will need to be signed before serializing or storing.
|
||||
func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) {
|
||||
switch content.(type) {
|
||||
case map[string]interface{}:
|
||||
case struct{}:
|
||||
default:
|
||||
return nil, errors.New("invalid data type")
|
||||
}
|
||||
|
||||
js := newJSONSignature()
|
||||
js.indent = " "
|
||||
|
||||
payload, err := json.MarshalIndent(content, "", js.indent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
js.payload = joseBase64UrlEncode(payload)
|
||||
|
||||
// Remove '\n}' from formatted section, put in protected header
|
||||
js.formatLength = len(payload) - 2
|
||||
js.formatTail = payload[js.formatLength:]
|
||||
|
||||
return js, nil
|
||||
}
|
||||
|
||||
func readIntFromMap(key string, m map[string]interface{}) (int, bool) {
|
||||
value, ok := m[key]
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
switch v := value.(type) {
|
||||
case int:
|
||||
return v, true
|
||||
case float64:
|
||||
return int(v), true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) {
|
||||
value, ok := m[key]
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
v, ok = value.(string)
|
||||
return
|
||||
}
|
||||
|
||||
// ParsePrettySignature parses a formatted signature into a
|
||||
// JSON signature. If the signatures are missing the format information
|
||||
// an error is thrown. The formatted signature must be created by
|
||||
// the same method as format signature.
|
||||
func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) {
|
||||
var contentMap map[string]json.RawMessage
|
||||
err := json.Unmarshal(content, &contentMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling content: %s", err)
|
||||
}
|
||||
sigMessage, ok := contentMap[signatureKey]
|
||||
if !ok {
|
||||
return nil, ErrMissingSignatureKey
|
||||
}
|
||||
|
||||
var signatureBlocks []jsParsedSignature
|
||||
err = json.Unmarshal([]byte(sigMessage), &signatureBlocks)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling signatures: %s", err)
|
||||
}
|
||||
|
||||
js := newJSONSignature()
|
||||
js.signatures = make([]jsSignature, len(signatureBlocks))
|
||||
|
||||
for i, signatureBlock := range signatureBlocks {
|
||||
protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("base64 decode error: %s", err)
|
||||
}
|
||||
var protectedHeader map[string]interface{}
|
||||
err = json.Unmarshal(protectedBytes, &protectedHeader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling protected header: %s", err)
|
||||
}
|
||||
|
||||
formatLength, ok := readIntFromMap("formatLength", protectedHeader)
|
||||
if !ok {
|
||||
return nil, errors.New("missing formatted length")
|
||||
}
|
||||
encodedTail, ok := readStringFromMap("formatTail", protectedHeader)
|
||||
if !ok {
|
||||
return nil, errors.New("missing formatted tail")
|
||||
}
|
||||
formatTail, err := joseBase64UrlDecode(encodedTail)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("base64 decode error on tail: %s", err)
|
||||
}
|
||||
if js.formatLength == 0 {
|
||||
js.formatLength = formatLength
|
||||
} else if js.formatLength != formatLength {
|
||||
return nil, errors.New("conflicting format length")
|
||||
}
|
||||
if len(js.formatTail) == 0 {
|
||||
js.formatTail = formatTail
|
||||
} else if bytes.Compare(js.formatTail, formatTail) != 0 {
|
||||
return nil, errors.New("conflicting format tail")
|
||||
}
|
||||
|
||||
header := jsHeader{
|
||||
Algorithm: signatureBlock.Header.Algorithm,
|
||||
Chain: signatureBlock.Header.Chain,
|
||||
}
|
||||
if signatureBlock.Header.JWK != nil {
|
||||
publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling public key: %s", err)
|
||||
}
|
||||
header.JWK = publicKey
|
||||
}
|
||||
js.signatures[i] = jsSignature{
|
||||
Header: header,
|
||||
Signature: signatureBlock.Signature,
|
||||
Protected: signatureBlock.Protected,
|
||||
}
|
||||
}
|
||||
if js.formatLength > len(content) {
|
||||
return nil, errors.New("invalid format length")
|
||||
}
|
||||
formatted := make([]byte, js.formatLength+len(js.formatTail))
|
||||
copy(formatted, content[:js.formatLength])
|
||||
copy(formatted[js.formatLength:], js.formatTail)
|
||||
js.indent = detectJSONIndent(formatted)
|
||||
js.payload = joseBase64UrlEncode(formatted)
|
||||
|
||||
return js, nil
|
||||
}
|
||||
|
||||
// PrettySignature formats a json signature into an easy to read
|
||||
// single json serialized object.
|
||||
func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) {
|
||||
if len(js.signatures) == 0 {
|
||||
return nil, errors.New("no signatures")
|
||||
}
|
||||
payload, err := joseBase64UrlDecode(js.payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload = payload[:js.formatLength]
|
||||
|
||||
sort.Sort(jsSignaturesSorted(js.signatures))
|
||||
|
||||
var marshalled []byte
|
||||
var marshallErr error
|
||||
if js.indent != "" {
|
||||
marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent)
|
||||
} else {
|
||||
marshalled, marshallErr = json.Marshal(js.signatures)
|
||||
}
|
||||
if marshallErr != nil {
|
||||
return nil, marshallErr
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34))
|
||||
buf.Write(payload)
|
||||
buf.WriteByte(',')
|
||||
if js.indent != "" {
|
||||
buf.WriteByte('\n')
|
||||
buf.WriteString(js.indent)
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(signatureKey)
|
||||
buf.WriteString("\": ")
|
||||
buf.Write(marshalled)
|
||||
buf.WriteByte('\n')
|
||||
} else {
|
||||
buf.WriteByte('"')
|
||||
buf.WriteString(signatureKey)
|
||||
buf.WriteString("\":")
|
||||
buf.Write(marshalled)
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// Signatures provides the signatures on this JWS as opaque blobs, sorted by
|
||||
// keyID. These blobs can be stored and reassembled with payloads. Internally,
|
||||
// they are simply marshaled json web signatures but implementations should
|
||||
// not rely on this.
|
||||
func (js *JSONSignature) Signatures() ([][]byte, error) {
|
||||
sort.Sort(jsSignaturesSorted(js.signatures))
|
||||
|
||||
var sb [][]byte
|
||||
for _, jsig := range js.signatures {
|
||||
p, err := json.Marshal(jsig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sb = append(sb, p)
|
||||
}
|
||||
|
||||
return sb, nil
|
||||
}
|
||||
|
||||
// Merge combines the signatures from one or more other signatures into the
|
||||
// method receiver. If the payloads differ for any argument, an error will be
|
||||
// returned and the receiver will not be modified.
|
||||
func (js *JSONSignature) Merge(others ...*JSONSignature) error {
|
||||
merged := js.signatures
|
||||
for _, other := range others {
|
||||
if js.payload != other.payload {
|
||||
return fmt.Errorf("payloads differ from merge target")
|
||||
}
|
||||
merged = append(merged, other.signatures...)
|
||||
}
|
||||
|
||||
js.signatures = merged
|
||||
return nil
|
||||
}
|
253
vendor/github.com/docker/libtrust/key.go
generated
vendored
253
vendor/github.com/docker/libtrust/key.go
generated
vendored
@@ -1,253 +0,0 @@
|
||||
package libtrust
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// PublicKey is a generic interface for a Public Key.
|
||||
type PublicKey interface {
|
||||
// KeyType returns the key type for this key. For elliptic curve keys,
|
||||
// this value should be "EC". For RSA keys, this value should be "RSA".
|
||||
KeyType() string
|
||||
// KeyID returns a distinct identifier which is unique to this Public Key.
|
||||
// The format generated by this library is a base32 encoding of a 240 bit
|
||||
// hash of the public key data divided into 12 groups like so:
|
||||
// ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
|
||||
KeyID() string
|
||||
// Verify verifyies the signature of the data in the io.Reader using this
|
||||
// Public Key. The alg parameter should identify the digital signature
|
||||
// algorithm which was used to produce the signature and should be
|
||||
// supported by this public key. Returns a nil error if the signature
|
||||
// is valid.
|
||||
Verify(data io.Reader, alg string, signature []byte) error
|
||||
// CryptoPublicKey returns the internal object which can be used as a
|
||||
// crypto.PublicKey for use with other standard library operations. The type
|
||||
// is either *rsa.PublicKey or *ecdsa.PublicKey
|
||||
CryptoPublicKey() crypto.PublicKey
|
||||
// These public keys can be serialized to the standard JSON encoding for
|
||||
// JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web
|
||||
// Algorithms.
|
||||
MarshalJSON() ([]byte, error)
|
||||
// These keys can also be serialized to the standard PEM encoding.
|
||||
PEMBlock() (*pem.Block, error)
|
||||
// The string representation of a key is its key type and ID.
|
||||
String() string
|
||||
AddExtendedField(string, interface{})
|
||||
GetExtendedField(string) interface{}
|
||||
}
|
||||
|
||||
// PrivateKey is a generic interface for a Private Key.
|
||||
type PrivateKey interface {
|
||||
// A PrivateKey contains all fields and methods of a PublicKey of the
|
||||
// same type. The MarshalJSON method also outputs the private key as a
|
||||
// JSON Web Key, and the PEMBlock method outputs the private key as a
|
||||
// PEM block.
|
||||
PublicKey
|
||||
// PublicKey returns the PublicKey associated with this PrivateKey.
|
||||
PublicKey() PublicKey
|
||||
// Sign signs the data read from the io.Reader using a signature algorithm
|
||||
// supported by the private key. If the specified hashing algorithm is
|
||||
// supported by this key, that hash function is used to generate the
|
||||
// signature otherwise the the default hashing algorithm for this key is
|
||||
// used. Returns the signature and identifier of the algorithm used.
|
||||
Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error)
|
||||
// CryptoPrivateKey returns the internal object which can be used as a
|
||||
// crypto.PublicKey for use with other standard library operations. The
|
||||
// type is either *rsa.PublicKey or *ecdsa.PublicKey
|
||||
CryptoPrivateKey() crypto.PrivateKey
|
||||
}
|
||||
|
||||
// FromCryptoPublicKey returns a libtrust PublicKey representation of the given
|
||||
// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given
|
||||
// key is of an unsupported type.
|
||||
func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) {
|
||||
switch cryptoPublicKey := cryptoPublicKey.(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
return fromECPublicKey(cryptoPublicKey)
|
||||
case *rsa.PublicKey:
|
||||
return fromRSAPublicKey(cryptoPublicKey), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey)
|
||||
}
|
||||
}
|
||||
|
||||
// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given
|
||||
// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given
|
||||
// key is of an unsupported type.
|
||||
func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) {
|
||||
switch cryptoPrivateKey := cryptoPrivateKey.(type) {
|
||||
case *ecdsa.PrivateKey:
|
||||
return fromECPrivateKey(cryptoPrivateKey)
|
||||
case *rsa.PrivateKey:
|
||||
return fromRSAPrivateKey(cryptoPrivateKey), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust
|
||||
// PublicKey or an error if there is a problem with the encoding.
|
||||
func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) {
|
||||
pemBlock, _ := pem.Decode(data)
|
||||
if pemBlock == nil {
|
||||
return nil, errors.New("unable to find PEM encoded data")
|
||||
} else if pemBlock.Type != "PUBLIC KEY" {
|
||||
return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
|
||||
}
|
||||
|
||||
return pubKeyFromPEMBlock(pemBlock)
|
||||
}
|
||||
|
||||
// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of
|
||||
// PEM blocks appended one after the other and returns a slice of PublicKey
|
||||
// objects that it finds.
|
||||
func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) {
|
||||
pubKeys := []PublicKey{}
|
||||
|
||||
for {
|
||||
var pemBlock *pem.Block
|
||||
pemBlock, data = pem.Decode(data)
|
||||
if pemBlock == nil {
|
||||
break
|
||||
} else if pemBlock.Type != "PUBLIC KEY" {
|
||||
return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
|
||||
}
|
||||
|
||||
pubKey, err := pubKeyFromPEMBlock(pemBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKeys = append(pubKeys, pubKey)
|
||||
}
|
||||
|
||||
return pubKeys, nil
|
||||
}
|
||||
|
||||
// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust
|
||||
// PrivateKey or an error if there is a problem with the encoding.
|
||||
func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) {
|
||||
pemBlock, _ := pem.Decode(data)
|
||||
if pemBlock == nil {
|
||||
return nil, errors.New("unable to find PEM encoded data")
|
||||
}
|
||||
|
||||
var key PrivateKey
|
||||
|
||||
switch {
|
||||
case pemBlock.Type == "RSA PRIVATE KEY":
|
||||
rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err)
|
||||
}
|
||||
key = fromRSAPrivateKey(rsaPrivateKey)
|
||||
case pemBlock.Type == "EC PRIVATE KEY":
|
||||
ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err)
|
||||
}
|
||||
key, err = fromECPrivateKey(ecPrivateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type)
|
||||
}
|
||||
|
||||
addPEMHeadersToKey(pemBlock, key.PublicKey())
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic
|
||||
// Public Key to be used with libtrust.
|
||||
func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) {
|
||||
jwk := make(map[string]interface{})
|
||||
|
||||
err := json.Unmarshal(data, &jwk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"decoding JWK Public Key JSON data: %s\n", err,
|
||||
)
|
||||
}
|
||||
|
||||
// Get the Key Type value.
|
||||
kty, err := stringFromMap(jwk, "kty")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK Public Key type: %s", err)
|
||||
}
|
||||
|
||||
switch {
|
||||
case kty == "EC":
|
||||
// Call out to unmarshal EC public key.
|
||||
return ecPublicKeyFromMap(jwk)
|
||||
case kty == "RSA":
|
||||
// Call out to unmarshal RSA public key.
|
||||
return rsaPublicKeyFromMap(jwk)
|
||||
default:
|
||||
return nil, fmt.Errorf(
|
||||
"JWK Public Key type not supported: %q\n", kty,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set
|
||||
// and returns a slice of Public Key objects.
|
||||
func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) {
|
||||
rawKeys, err := loadJSONKeySetRaw(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKeys := make([]PublicKey, 0, len(rawKeys))
|
||||
|
||||
for _, rawKey := range rawKeys {
|
||||
pubKey, err := UnmarshalPublicKeyJWK(rawKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pubKeys = append(pubKeys, pubKey)
|
||||
}
|
||||
|
||||
return pubKeys, nil
|
||||
}
|
||||
|
||||
// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic
|
||||
// Private Key to be used with libtrust.
|
||||
func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) {
|
||||
jwk := make(map[string]interface{})
|
||||
|
||||
err := json.Unmarshal(data, &jwk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"decoding JWK Private Key JSON data: %s\n", err,
|
||||
)
|
||||
}
|
||||
|
||||
// Get the Key Type value.
|
||||
kty, err := stringFromMap(jwk, "kty")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK Private Key type: %s", err)
|
||||
}
|
||||
|
||||
switch {
|
||||
case kty == "EC":
|
||||
// Call out to unmarshal EC private key.
|
||||
return ecPrivateKeyFromMap(jwk)
|
||||
case kty == "RSA":
|
||||
// Call out to unmarshal RSA private key.
|
||||
return rsaPrivateKeyFromMap(jwk)
|
||||
default:
|
||||
return nil, fmt.Errorf(
|
||||
"JWK Private Key type not supported: %q\n", kty,
|
||||
)
|
||||
}
|
||||
}
|
255
vendor/github.com/docker/libtrust/key_files.go
generated
vendored
255
vendor/github.com/docker/libtrust/key_files.go
generated
vendored
@@ -1,255 +0,0 @@
|
||||
package libtrust
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrKeyFileDoesNotExist indicates that the private key file does not exist.
|
||||
ErrKeyFileDoesNotExist = errors.New("key file does not exist")
|
||||
)
|
||||
|
||||
func readKeyFileBytes(filename string) ([]byte, error) {
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = ErrKeyFileDoesNotExist
|
||||
} else {
|
||||
err = fmt.Errorf("unable to read key file %s: %s", filename, err)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
/*
|
||||
Loading and Saving of Public and Private Keys in either PEM or JWK format.
|
||||
*/
|
||||
|
||||
// LoadKeyFile opens the given filename and attempts to read a Private Key
|
||||
// encoded in either PEM or JWK format (if .json or .jwk file extension).
|
||||
func LoadKeyFile(filename string) (PrivateKey, error) {
|
||||
contents, err := readKeyFileBytes(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var key PrivateKey
|
||||
|
||||
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
|
||||
key, err = UnmarshalPrivateKeyJWK(contents)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to decode private key JWK: %s", err)
|
||||
}
|
||||
} else {
|
||||
key, err = UnmarshalPrivateKeyPEM(contents)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to decode private key PEM: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// LoadPublicKeyFile opens the given filename and attempts to read a Public Key
|
||||
// encoded in either PEM or JWK format (if .json or .jwk file extension).
|
||||
func LoadPublicKeyFile(filename string) (PublicKey, error) {
|
||||
contents, err := readKeyFileBytes(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var key PublicKey
|
||||
|
||||
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
|
||||
key, err = UnmarshalPublicKeyJWK(contents)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to decode public key JWK: %s", err)
|
||||
}
|
||||
} else {
|
||||
key, err = UnmarshalPublicKeyPEM(contents)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to decode public key PEM: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// SaveKey saves the given key to a file using the provided filename.
|
||||
// This process will overwrite any existing file at the provided location.
|
||||
func SaveKey(filename string, key PrivateKey) error {
|
||||
var encodedKey []byte
|
||||
var err error
|
||||
|
||||
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
|
||||
// Encode in JSON Web Key format.
|
||||
encodedKey, err = json.MarshalIndent(key, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to encode private key JWK: %s", err)
|
||||
}
|
||||
} else {
|
||||
// Encode in PEM format.
|
||||
pemBlock, err := key.PEMBlock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to encode private key PEM: %s", err)
|
||||
}
|
||||
encodedKey = pem.EncodeToMemory(pemBlock)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write private key file %s: %s", filename, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SavePublicKey saves the given public key to the file.
|
||||
func SavePublicKey(filename string, key PublicKey) error {
|
||||
var encodedKey []byte
|
||||
var err error
|
||||
|
||||
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
|
||||
// Encode in JSON Web Key format.
|
||||
encodedKey, err = json.MarshalIndent(key, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to encode public key JWK: %s", err)
|
||||
}
|
||||
} else {
|
||||
// Encode in PEM format.
|
||||
pemBlock, err := key.PEMBlock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to encode public key PEM: %s", err)
|
||||
}
|
||||
encodedKey = pem.EncodeToMemory(pemBlock)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write public key file %s: %s", filename, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Public Key Set files
|
||||
|
||||
type jwkSet struct {
|
||||
Keys []json.RawMessage `json:"keys"`
|
||||
}
|
||||
|
||||
// LoadKeySetFile loads a key set
|
||||
func LoadKeySetFile(filename string) ([]PublicKey, error) {
|
||||
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
|
||||
return loadJSONKeySetFile(filename)
|
||||
}
|
||||
|
||||
// Must be a PEM format file
|
||||
return loadPEMKeySetFile(filename)
|
||||
}
|
||||
|
||||
func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) {
|
||||
if len(data) == 0 {
|
||||
// This is okay, just return an empty slice.
|
||||
return []json.RawMessage{}, nil
|
||||
}
|
||||
|
||||
keySet := jwkSet{}
|
||||
|
||||
err := json.Unmarshal(data, &keySet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err)
|
||||
}
|
||||
|
||||
return keySet.Keys, nil
|
||||
}
|
||||
|
||||
func loadJSONKeySetFile(filename string) ([]PublicKey, error) {
|
||||
contents, err := readKeyFileBytes(filename)
|
||||
if err != nil && err != ErrKeyFileDoesNotExist {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return UnmarshalPublicKeyJWKSet(contents)
|
||||
}
|
||||
|
||||
func loadPEMKeySetFile(filename string) ([]PublicKey, error) {
|
||||
data, err := readKeyFileBytes(filename)
|
||||
if err != nil && err != ErrKeyFileDoesNotExist {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return UnmarshalPublicKeyPEMBundle(data)
|
||||
}
|
||||
|
||||
// AddKeySetFile adds a key to a key set
|
||||
func AddKeySetFile(filename string, key PublicKey) error {
|
||||
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
|
||||
return addKeySetJSONFile(filename, key)
|
||||
}
|
||||
|
||||
// Must be a PEM format file
|
||||
return addKeySetPEMFile(filename, key)
|
||||
}
|
||||
|
||||
func addKeySetJSONFile(filename string, key PublicKey) error {
|
||||
encodedKey, err := json.Marshal(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to encode trusted client key: %s", err)
|
||||
}
|
||||
|
||||
contents, err := readKeyFileBytes(filename)
|
||||
if err != nil && err != ErrKeyFileDoesNotExist {
|
||||
return err
|
||||
}
|
||||
|
||||
rawEntries, err := loadJSONKeySetRaw(contents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rawEntries = append(rawEntries, json.RawMessage(encodedKey))
|
||||
entriesWrapper := jwkSet{Keys: rawEntries}
|
||||
|
||||
encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to encode trusted client keys: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addKeySetPEMFile(filename string, key PublicKey) error {
|
||||
// Encode to PEM, open file for appending, write PEM.
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
pemBlock, err := key.PEMBlock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to encoded trusted key: %s", err)
|
||||
}
|
||||
|
||||
_, err = file.Write(pem.EncodeToMemory(pemBlock))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write trusted keys file: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
175
vendor/github.com/docker/libtrust/key_manager.go
generated
vendored
175
vendor/github.com/docker/libtrust/key_manager.go
generated
vendored
@@ -1,175 +0,0 @@
|
||||
package libtrust
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ClientKeyManager manages client keys on the filesystem
|
||||
type ClientKeyManager struct {
|
||||
key PrivateKey
|
||||
clientFile string
|
||||
clientDir string
|
||||
|
||||
clientLock sync.RWMutex
|
||||
clients []PublicKey
|
||||
|
||||
configLock sync.Mutex
|
||||
configs []*tls.Config
|
||||
}
|
||||
|
||||
// NewClientKeyManager loads a new manager from a set of key files
|
||||
// and managed by the given private key.
|
||||
func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) {
|
||||
m := &ClientKeyManager{
|
||||
key: trustKey,
|
||||
clientFile: clientFile,
|
||||
clientDir: clientDir,
|
||||
}
|
||||
if err := m.loadKeys(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO Start watching file and directory
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *ClientKeyManager) loadKeys() (err error) {
|
||||
// Load authorized keys file
|
||||
var clients []PublicKey
|
||||
if c.clientFile != "" {
|
||||
clients, err = LoadKeySetFile(c.clientFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load authorized keys: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Add clients from authorized keys directory
|
||||
files, err := ioutil.ReadDir(c.clientDir)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("unable to open authorized keys directory: %s", err)
|
||||
}
|
||||
for _, f := range files {
|
||||
if !f.IsDir() {
|
||||
publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load authorized key file: %s", err)
|
||||
}
|
||||
clients = append(clients, publicKey)
|
||||
}
|
||||
}
|
||||
|
||||
c.clientLock.Lock()
|
||||
c.clients = clients
|
||||
c.clientLock.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterTLSConfig registers a tls configuration to manager
|
||||
// such that any changes to the keys may be reflected in
|
||||
// the tls client CA pool
|
||||
func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error {
|
||||
c.clientLock.RLock()
|
||||
certPool, err := GenerateCACertPool(c.key, c.clients)
|
||||
if err != nil {
|
||||
return fmt.Errorf("CA pool generation error: %s", err)
|
||||
}
|
||||
c.clientLock.RUnlock()
|
||||
|
||||
tlsConfig.ClientCAs = certPool
|
||||
|
||||
c.configLock.Lock()
|
||||
c.configs = append(c.configs, tlsConfig)
|
||||
c.configLock.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for
|
||||
// libtrust identity authentication for the domain specified
|
||||
func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) {
|
||||
tlsConfig := newTLSConfig()
|
||||
|
||||
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
if err := clients.RegisterTLSConfig(tlsConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate cert
|
||||
ips, domains, err := parseAddr(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// add domain that it expects clients to use
|
||||
domains = append(domains, domain)
|
||||
x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("certificate generation error: %s", err)
|
||||
}
|
||||
tlsConfig.Certificates = []tls.Certificate{{
|
||||
Certificate: [][]byte{x509Cert.Raw},
|
||||
PrivateKey: trustKey.CryptoPrivateKey(),
|
||||
Leaf: x509Cert,
|
||||
}}
|
||||
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
// NewCertAuthTLSConfig creates a tls.Config for the server to use for
|
||||
// certificate authentication
|
||||
func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) {
|
||||
tlsConfig := newTLSConfig()
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err)
|
||||
}
|
||||
tlsConfig.Certificates = []tls.Certificate{cert}
|
||||
|
||||
// Verify client certificates against a CA?
|
||||
if caPath != "" {
|
||||
certPool := x509.NewCertPool()
|
||||
file, err := ioutil.ReadFile(caPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Couldn't read CA certificate: %s", err)
|
||||
}
|
||||
certPool.AppendCertsFromPEM(file)
|
||||
|
||||
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
tlsConfig.ClientCAs = certPool
|
||||
}
|
||||
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
func newTLSConfig() *tls.Config {
|
||||
return &tls.Config{
|
||||
NextProtos: []string{"http/1.1"},
|
||||
// Avoid fallback on insecure SSL protocols
|
||||
MinVersion: tls.VersionTLS10,
|
||||
}
|
||||
}
|
||||
|
||||
// parseAddr parses an address into an array of IPs and domains
|
||||
func parseAddr(addr string) ([]net.IP, []string, error) {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var domains []string
|
||||
var ips []net.IP
|
||||
ip := net.ParseIP(host)
|
||||
if ip != nil {
|
||||
ips = []net.IP{ip}
|
||||
} else {
|
||||
domains = []string{host}
|
||||
}
|
||||
return ips, domains, nil
|
||||
}
|
427
vendor/github.com/docker/libtrust/rsa_key.go
generated
vendored
427
vendor/github.com/docker/libtrust/rsa_key.go
generated
vendored
@@ -1,427 +0,0 @@
|
||||
package libtrust
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
/*
|
||||
* RSA DSA PUBLIC KEY
|
||||
*/
|
||||
|
||||
// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms.
|
||||
type rsaPublicKey struct {
|
||||
*rsa.PublicKey
|
||||
extended map[string]interface{}
|
||||
}
|
||||
|
||||
func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey {
|
||||
return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}}
|
||||
}
|
||||
|
||||
// KeyType returns the JWK key type for RSA keys, i.e., "RSA".
|
||||
func (k *rsaPublicKey) KeyType() string {
|
||||
return "RSA"
|
||||
}
|
||||
|
||||
// KeyID returns a distinct identifier which is unique to this Public Key.
|
||||
func (k *rsaPublicKey) KeyID() string {
|
||||
return keyIDFromCryptoKey(k)
|
||||
}
|
||||
|
||||
func (k *rsaPublicKey) String() string {
|
||||
return fmt.Sprintf("RSA Public Key <%s>", k.KeyID())
|
||||
}
|
||||
|
||||
// Verify verifyies the signature of the data in the io.Reader using this Public Key.
|
||||
// The alg parameter should be the name of the JWA digital signature algorithm
|
||||
// which was used to produce the signature and should be supported by this
|
||||
// public key. Returns a nil error if the signature is valid.
|
||||
func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
|
||||
// Verify the signature of the given date, return non-nil error if valid.
|
||||
sigAlg, err := rsaSignatureAlgorithmByName(alg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to verify Signature: %s", err)
|
||||
}
|
||||
|
||||
hasher := sigAlg.HashID().New()
|
||||
_, err = io.Copy(hasher, data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading data to sign: %s", err)
|
||||
}
|
||||
hash := hasher.Sum(nil)
|
||||
|
||||
err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CryptoPublicKey returns the internal object which can be used as a
|
||||
// crypto.PublicKey for use with other standard library operations. The type
|
||||
// is either *rsa.PublicKey or *ecdsa.PublicKey
|
||||
func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
|
||||
return k.PublicKey
|
||||
}
|
||||
|
||||
func (k *rsaPublicKey) toMap() map[string]interface{} {
|
||||
jwk := make(map[string]interface{})
|
||||
for k, v := range k.extended {
|
||||
jwk[k] = v
|
||||
}
|
||||
jwk["kty"] = k.KeyType()
|
||||
jwk["kid"] = k.KeyID()
|
||||
jwk["n"] = joseBase64UrlEncode(k.N.Bytes())
|
||||
jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E))
|
||||
|
||||
return jwk
|
||||
}
|
||||
|
||||
// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
|
||||
// RSA keys.
|
||||
func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) {
|
||||
return json.Marshal(k.toMap())
|
||||
}
|
||||
|
||||
// PEMBlock serializes this Public Key to DER-encoded PKIX format.
|
||||
func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) {
|
||||
derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err)
|
||||
}
|
||||
k.extended["kid"] = k.KeyID() // For display purposes.
|
||||
return createPemBlock("PUBLIC KEY", derBytes, k.extended)
|
||||
}
|
||||
|
||||
func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) {
|
||||
k.extended[field] = value
|
||||
}
|
||||
|
||||
func (k *rsaPublicKey) GetExtendedField(field string) interface{} {
|
||||
v, ok := k.extended[field]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) {
|
||||
// JWK key type (kty) has already been determined to be "RSA".
|
||||
// Need to extract 'n', 'e', and 'kid' and check for
|
||||
// consistency.
|
||||
|
||||
// Get the modulus parameter N.
|
||||
nB64Url, err := stringFromMap(jwk, "n")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
|
||||
}
|
||||
|
||||
n, err := parseRSAModulusParam(nB64Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
|
||||
}
|
||||
|
||||
// Get the public exponent E.
|
||||
eB64Url, err := stringFromMap(jwk, "e")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
|
||||
}
|
||||
|
||||
e, err := parseRSAPublicExponentParam(eB64Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
|
||||
}
|
||||
|
||||
key := &rsaPublicKey{
|
||||
PublicKey: &rsa.PublicKey{N: n, E: e},
|
||||
}
|
||||
|
||||
// Key ID is optional, but if it exists, it should match the key.
|
||||
_, ok := jwk["kid"]
|
||||
if ok {
|
||||
kid, err := stringFromMap(jwk, "kid")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err)
|
||||
}
|
||||
if kid != key.KeyID() {
|
||||
return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := jwk["d"]; ok {
|
||||
return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent")
|
||||
}
|
||||
|
||||
key.extended = jwk
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
/*
|
||||
* RSA DSA PRIVATE KEY
|
||||
*/
|
||||
|
||||
// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms.
|
||||
type rsaPrivateKey struct {
|
||||
rsaPublicKey
|
||||
*rsa.PrivateKey
|
||||
}
|
||||
|
||||
func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey {
|
||||
return &rsaPrivateKey{
|
||||
*fromRSAPublicKey(&cryptoPrivateKey.PublicKey),
|
||||
cryptoPrivateKey,
|
||||
}
|
||||
}
|
||||
|
||||
// PublicKey returns the Public Key data associated with this Private Key.
|
||||
func (k *rsaPrivateKey) PublicKey() PublicKey {
|
||||
return &k.rsaPublicKey
|
||||
}
|
||||
|
||||
func (k *rsaPrivateKey) String() string {
|
||||
return fmt.Sprintf("RSA Private Key <%s>", k.KeyID())
|
||||
}
|
||||
|
||||
// Sign signs the data read from the io.Reader using a signature algorithm supported
|
||||
// by the RSA private key. If the specified hashing algorithm is supported by
|
||||
// this key, that hash function is used to generate the signature otherwise the
|
||||
// the default hashing algorithm for this key is used. Returns the signature
|
||||
// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384",
|
||||
// "RS512".
|
||||
func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
|
||||
// Generate a signature of the data using the internal alg.
|
||||
sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID)
|
||||
hasher := sigAlg.HashID().New()
|
||||
|
||||
_, err = io.Copy(hasher, data)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("error reading data to sign: %s", err)
|
||||
}
|
||||
hash := hasher.Sum(nil)
|
||||
|
||||
signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("error producing signature: %s", err)
|
||||
}
|
||||
|
||||
alg = sigAlg.HeaderParam()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// CryptoPrivateKey returns the internal object which can be used as a
|
||||
// crypto.PublicKey for use with other standard library operations. The type
|
||||
// is either *rsa.PublicKey or *ecdsa.PublicKey
|
||||
func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
|
||||
return k.PrivateKey
|
||||
}
|
||||
|
||||
func (k *rsaPrivateKey) toMap() map[string]interface{} {
|
||||
k.Precompute() // Make sure the precomputed values are stored.
|
||||
jwk := k.rsaPublicKey.toMap()
|
||||
|
||||
jwk["d"] = joseBase64UrlEncode(k.D.Bytes())
|
||||
jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes())
|
||||
jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes())
|
||||
jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes())
|
||||
jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes())
|
||||
jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes())
|
||||
|
||||
otherPrimes := k.Primes[2:]
|
||||
|
||||
if len(otherPrimes) > 0 {
|
||||
otherPrimesInfo := make([]interface{}, len(otherPrimes))
|
||||
for i, r := range otherPrimes {
|
||||
otherPrimeInfo := make(map[string]string, 3)
|
||||
otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes())
|
||||
crtVal := k.Precomputed.CRTValues[i]
|
||||
otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes())
|
||||
otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes())
|
||||
otherPrimesInfo[i] = otherPrimeInfo
|
||||
}
|
||||
jwk["oth"] = otherPrimesInfo
|
||||
}
|
||||
|
||||
return jwk
|
||||
}
|
||||
|
||||
// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
|
||||
// RSA keys.
|
||||
func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) {
|
||||
return json.Marshal(k.toMap())
|
||||
}
|
||||
|
||||
// PEMBlock serializes this Private Key to DER-encoded PKIX format.
|
||||
func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) {
|
||||
derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey)
|
||||
k.extended["keyID"] = k.KeyID() // For display purposes.
|
||||
return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended)
|
||||
}
|
||||
|
||||
func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) {
|
||||
// The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that
|
||||
// only the private key exponent 'd' is REQUIRED, the others are just for
|
||||
// signature/decryption optimizations and SHOULD be included when the JWK
|
||||
// is produced. We MAY choose to accept a JWK which only includes 'd', but
|
||||
// we're going to go ahead and not choose to accept it without the extra
|
||||
// fields. Only the 'oth' field will be optional (for multi-prime keys).
|
||||
privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err)
|
||||
}
|
||||
firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
|
||||
}
|
||||
secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
|
||||
}
|
||||
firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
|
||||
}
|
||||
secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
|
||||
}
|
||||
crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
|
||||
}
|
||||
|
||||
var oth interface{}
|
||||
if _, ok := jwk["oth"]; ok {
|
||||
oth = jwk["oth"]
|
||||
delete(jwk, "oth")
|
||||
}
|
||||
|
||||
// JWK key type (kty) has already been determined to be "RSA".
|
||||
// Need to extract the public key information, then extract the private
|
||||
// key values.
|
||||
publicKey, err := rsaPublicKeyFromMap(jwk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
privateKey := &rsa.PrivateKey{
|
||||
PublicKey: *publicKey.PublicKey,
|
||||
D: privateExponent,
|
||||
Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor},
|
||||
Precomputed: rsa.PrecomputedValues{
|
||||
Dp: firstFactorCRT,
|
||||
Dq: secondFactorCRT,
|
||||
Qinv: crtCoeff,
|
||||
},
|
||||
}
|
||||
|
||||
if oth != nil {
|
||||
// Should be an array of more JSON objects.
|
||||
otherPrimesInfo, ok := oth.([]interface{})
|
||||
if !ok {
|
||||
return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array")
|
||||
}
|
||||
numOtherPrimeFactors := len(otherPrimesInfo)
|
||||
if numOtherPrimeFactors == 0 {
|
||||
return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty")
|
||||
}
|
||||
otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors)
|
||||
productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor)
|
||||
crtValues := make([]rsa.CRTValue, numOtherPrimeFactors)
|
||||
|
||||
for i, val := range otherPrimesInfo {
|
||||
otherPrimeinfo, ok := val.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object")
|
||||
}
|
||||
|
||||
otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
|
||||
}
|
||||
otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
|
||||
}
|
||||
otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
|
||||
}
|
||||
|
||||
crtValue := crtValues[i]
|
||||
crtValue.Exp = otherFactorCRT
|
||||
crtValue.Coeff = otherCrtCoeff
|
||||
crtValue.R = productOfPrimes
|
||||
otherPrimeFactors[i] = otherPrimeFactor
|
||||
productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor)
|
||||
}
|
||||
|
||||
privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...)
|
||||
privateKey.Precomputed.CRTValues = crtValues
|
||||
}
|
||||
|
||||
key := &rsaPrivateKey{
|
||||
rsaPublicKey: *publicKey,
|
||||
PrivateKey: privateKey,
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
/*
|
||||
* Key Generation Functions.
|
||||
*/
|
||||
|
||||
func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) {
|
||||
k = new(rsaPrivateKey)
|
||||
k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey
|
||||
k.extended = make(map[string]interface{})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA.
|
||||
func GenerateRSA2048PrivateKey() (PrivateKey, error) {
|
||||
k, err := generateRSAPrivateKey(2048)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err)
|
||||
}
|
||||
|
||||
return k, nil
|
||||
}
|
||||
|
||||
// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA.
|
||||
func GenerateRSA3072PrivateKey() (PrivateKey, error) {
|
||||
k, err := generateRSAPrivateKey(3072)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err)
|
||||
}
|
||||
|
||||
return k, nil
|
||||
}
|
||||
|
||||
// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA.
|
||||
func GenerateRSA4096PrivateKey() (PrivateKey, error) {
|
||||
k, err := generateRSAPrivateKey(4096)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err)
|
||||
}
|
||||
|
||||
return k, nil
|
||||
}
|
363
vendor/github.com/docker/libtrust/util.go
generated
vendored
363
vendor/github.com/docker/libtrust/util.go
generated
vendored
@@ -1,363 +0,0 @@
|
||||
package libtrust
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/elliptic"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LoadOrCreateTrustKey will load a PrivateKey from the specified path
|
||||
func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) {
|
||||
if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
trustKey, err := LoadKeyFile(trustKeyPath)
|
||||
if err == ErrKeyFileDoesNotExist {
|
||||
trustKey, err = GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating key: %s", err)
|
||||
}
|
||||
|
||||
if err := SaveKey(trustKeyPath, trustKey); err != nil {
|
||||
return nil, fmt.Errorf("error saving key file: %s", err)
|
||||
}
|
||||
|
||||
dir, file := filepath.Split(trustKeyPath)
|
||||
if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil {
|
||||
return nil, fmt.Errorf("error saving public key file: %s", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("error loading key file: %s", err)
|
||||
}
|
||||
return trustKey, nil
|
||||
}
|
||||
|
||||
// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity
|
||||
// based authentication from the specified dockerUrl, the rootConfigPath and
|
||||
// the server name to which it is connecting.
|
||||
// If trustUnknownHosts is true it will automatically add the host to the
|
||||
// known-hosts.json in rootConfigPath.
|
||||
func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) {
|
||||
tlsConfig := newTLSConfig()
|
||||
|
||||
trustKeyPath := filepath.Join(rootConfigPath, "key.json")
|
||||
knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json")
|
||||
|
||||
u, err := url.Parse(dockerUrl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse machine url")
|
||||
}
|
||||
|
||||
if u.Scheme == "unix" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
addr := u.Host
|
||||
proto := "tcp"
|
||||
|
||||
trustKey, err := LoadOrCreateTrustKey(trustKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to load trust key: %s", err)
|
||||
}
|
||||
|
||||
knownHosts, err := LoadKeySetFile(knownHostsPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load trusted hosts file: %s", err)
|
||||
}
|
||||
|
||||
allowedHosts, err := FilterByHosts(knownHosts, addr, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error filtering hosts: %s", err)
|
||||
}
|
||||
|
||||
certPool, err := GenerateCACertPool(trustKey, allowedHosts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not create CA pool: %s", err)
|
||||
}
|
||||
|
||||
tlsConfig.ServerName = serverName
|
||||
tlsConfig.RootCAs = certPool
|
||||
|
||||
x509Cert, err := GenerateSelfSignedClientCert(trustKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("certificate generation error: %s", err)
|
||||
}
|
||||
|
||||
tlsConfig.Certificates = []tls.Certificate{{
|
||||
Certificate: [][]byte{x509Cert.Raw},
|
||||
PrivateKey: trustKey.CryptoPrivateKey(),
|
||||
Leaf: x509Cert,
|
||||
}}
|
||||
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
|
||||
testConn, err := tls.Dial(proto, addr, tlsConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tls Handshake error: %s", err)
|
||||
}
|
||||
|
||||
opts := x509.VerifyOptions{
|
||||
Roots: tlsConfig.RootCAs,
|
||||
CurrentTime: time.Now(),
|
||||
DNSName: tlsConfig.ServerName,
|
||||
Intermediates: x509.NewCertPool(),
|
||||
}
|
||||
|
||||
certs := testConn.ConnectionState().PeerCertificates
|
||||
for i, cert := range certs {
|
||||
if i == 0 {
|
||||
continue
|
||||
}
|
||||
opts.Intermediates.AddCert(cert)
|
||||
}
|
||||
|
||||
if _, err := certs[0].Verify(opts); err != nil {
|
||||
if _, ok := err.(x509.UnknownAuthorityError); ok {
|
||||
if trustUnknownHosts {
|
||||
pubKey, err := FromCryptoPublicKey(certs[0].PublicKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error extracting public key from cert: %s", err)
|
||||
}
|
||||
|
||||
pubKey.AddExtendedField("hosts", []string{addr})
|
||||
|
||||
if err := AddKeySetFile(knownHostsPath, pubKey); err != nil {
|
||||
return nil, fmt.Errorf("error adding machine to known hosts: %s", err)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("unable to connect. unknown host: %s", addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testConn.Close()
|
||||
tlsConfig.InsecureSkipVerify = false
|
||||
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
// joseBase64UrlEncode encodes the given data using the standard base64 url
|
||||
// encoding format but with all trailing '=' characters omitted in accordance
|
||||
// with the jose specification.
|
||||
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
|
||||
func joseBase64UrlEncode(b []byte) string {
|
||||
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
|
||||
}
|
||||
|
||||
// joseBase64UrlDecode decodes the given string using the standard base64 url
|
||||
// decoder but first adds the appropriate number of trailing '=' characters in
|
||||
// accordance with the jose specification.
|
||||
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
|
||||
func joseBase64UrlDecode(s string) ([]byte, error) {
|
||||
s = strings.Replace(s, "\n", "", -1)
|
||||
s = strings.Replace(s, " ", "", -1)
|
||||
switch len(s) % 4 {
|
||||
case 0:
|
||||
case 2:
|
||||
s += "=="
|
||||
case 3:
|
||||
s += "="
|
||||
default:
|
||||
return nil, errors.New("illegal base64url string")
|
||||
}
|
||||
return base64.URLEncoding.DecodeString(s)
|
||||
}
|
||||
|
||||
func keyIDEncode(b []byte) string {
|
||||
s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=")
|
||||
var buf bytes.Buffer
|
||||
var i int
|
||||
for i = 0; i < len(s)/4-1; i++ {
|
||||
start := i * 4
|
||||
end := start + 4
|
||||
buf.WriteString(s[start:end] + ":")
|
||||
}
|
||||
buf.WriteString(s[i*4:])
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func keyIDFromCryptoKey(pubKey PublicKey) string {
|
||||
// Generate and return a 'libtrust' fingerprint of the public key.
|
||||
// For an RSA key this should be:
|
||||
// SHA256(DER encoded ASN1)
|
||||
// Then truncated to 240 bits and encoded into 12 base32 groups like so:
|
||||
// ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
|
||||
derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey())
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
hasher := crypto.SHA256.New()
|
||||
hasher.Write(derBytes)
|
||||
return keyIDEncode(hasher.Sum(nil)[:30])
|
||||
}
|
||||
|
||||
func stringFromMap(m map[string]interface{}, key string) (string, error) {
|
||||
val, ok := m[key]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("%q value not specified", key)
|
||||
}
|
||||
|
||||
str, ok := val.(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("%q value must be a string", key)
|
||||
}
|
||||
delete(m, key)
|
||||
|
||||
return str, nil
|
||||
}
|
||||
|
||||
func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) {
|
||||
curveByteLen := (curve.Params().BitSize + 7) >> 3
|
||||
|
||||
cBytes, err := joseBase64UrlDecode(cB64Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
|
||||
}
|
||||
cByteLength := len(cBytes)
|
||||
if cByteLength != curveByteLen {
|
||||
return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen)
|
||||
}
|
||||
return new(big.Int).SetBytes(cBytes), nil
|
||||
}
|
||||
|
||||
func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) {
|
||||
dBytes, err := joseBase64UrlDecode(dB64Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
|
||||
}
|
||||
|
||||
// The length of this octet string MUST be ceiling(log-base-2(n)/8)
|
||||
// octets (where n is the order of the curve). This is because the private
|
||||
// key d must be in the interval [1, n-1] so the bitlength of d should be
|
||||
// no larger than the bitlength of n-1. The easiest way to find the octet
|
||||
// length is to take bitlength(n-1), add 7 to force a carry, and shift this
|
||||
// bit sequence right by 3, which is essentially dividing by 8 and adding
|
||||
// 1 if there is any remainder. Thus, the private key value d should be
|
||||
// output to (bitlength(n-1)+7)>>3 octets.
|
||||
n := curve.Params().N
|
||||
octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
|
||||
dByteLength := len(dBytes)
|
||||
|
||||
if dByteLength != octetLength {
|
||||
return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength)
|
||||
}
|
||||
|
||||
return new(big.Int).SetBytes(dBytes), nil
|
||||
}
|
||||
|
||||
func parseRSAModulusParam(nB64Url string) (*big.Int, error) {
|
||||
nBytes, err := joseBase64UrlDecode(nB64Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
|
||||
}
|
||||
|
||||
return new(big.Int).SetBytes(nBytes), nil
|
||||
}
|
||||
|
||||
func serializeRSAPublicExponentParam(e int) []byte {
|
||||
// We MUST use the minimum number of octets to represent E.
|
||||
// E is supposed to be 65537 for performance and security reasons
|
||||
// and is what golang's rsa package generates, but it might be
|
||||
// different if imported from some other generator.
|
||||
buf := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(buf, uint32(e))
|
||||
var i int
|
||||
for i = 0; i < 8; i++ {
|
||||
if buf[i] != 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return buf[i:]
|
||||
}
|
||||
|
||||
func parseRSAPublicExponentParam(eB64Url string) (int, error) {
|
||||
eBytes, err := joseBase64UrlDecode(eB64Url)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid base64 URL encoding: %s", err)
|
||||
}
|
||||
// Only the minimum number of bytes were used to represent E, but
|
||||
// binary.BigEndian.Uint32 expects at least 4 bytes, so we need
|
||||
// to add zero padding if necassary.
|
||||
byteLen := len(eBytes)
|
||||
buf := make([]byte, 4-byteLen, 4)
|
||||
eBytes = append(buf, eBytes...)
|
||||
|
||||
return int(binary.BigEndian.Uint32(eBytes)), nil
|
||||
}
|
||||
|
||||
func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) {
|
||||
b64Url, err := stringFromMap(m, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
paramBytes, err := joseBase64UrlDecode(b64Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invaled base64 URL encoding: %s", err)
|
||||
}
|
||||
|
||||
return new(big.Int).SetBytes(paramBytes), nil
|
||||
}
|
||||
|
||||
func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) {
|
||||
pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}}
|
||||
for k, v := range headers {
|
||||
switch val := v.(type) {
|
||||
case string:
|
||||
pemBlock.Headers[k] = val
|
||||
case []string:
|
||||
if k == "hosts" {
|
||||
pemBlock.Headers[k] = strings.Join(val, ",")
|
||||
} else {
|
||||
// Return error, non-encodable type
|
||||
}
|
||||
default:
|
||||
// Return error, non-encodable type
|
||||
}
|
||||
}
|
||||
|
||||
return pemBlock, nil
|
||||
}
|
||||
|
||||
func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) {
|
||||
cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err)
|
||||
}
|
||||
|
||||
pubKey, err := FromCryptoPublicKey(cryptoPublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addPEMHeadersToKey(pemBlock, pubKey)
|
||||
|
||||
return pubKey, nil
|
||||
}
|
||||
|
||||
func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) {
|
||||
for key, value := range pemBlock.Headers {
|
||||
var safeVal interface{}
|
||||
if key == "hosts" {
|
||||
safeVal = strings.Split(value, ",")
|
||||
} else {
|
||||
safeVal = value
|
||||
}
|
||||
pubKey.AddExtendedField(key, safeVal)
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user