shrink vendors

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca
2016-01-23 09:40:50 +01:00
parent 16caf459e5
commit c6eea504d8
72 changed files with 192 additions and 9208 deletions

View File

@@ -51,6 +51,8 @@ clean() {
)
local platforms=( linux/amd64 linux/386 )
local buildTags=( )
echo
echo -n 'collecting import graph, '

View File

@@ -6,22 +6,12 @@ rm -rf vendor/
source 'hack/.vendor-helpers.sh'
clone git github.com/codegangsta/cli c31a7975863e7810c92e2e288a9ab074f9a88f29
clone git github.com/Azure/go-ansiterm 70b2c90b260171e829f1ebd7c17f600c11858dbe
#clone git github.com/Azure/go-ansiterm 70b2c90b260171e829f1ebd7c17f600c11858dbe
clone git github.com/Sirupsen/logrus v0.8.7 # logrus is a common dependency among multiple deps
clone git github.com/docker/docker v1.10.0-rc1
clone git github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
clone git github.com/gorilla/context 14f550f51a
clone git github.com/gorilla/mux e444e69cbd
clone git github.com/docker/docker 7be8f7264435db8359ec9fd18362391bad1ca4d7
clone git golang.org/x/net 47990a1ba55743e6ef1affd3a14e5bac8553615d https://github.com/golang/net.git
clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3
clone git github.com/docker/go-connections v0.1.2
clone git github.com/docker/engine-api v0.2.2
# get graph and distribution packages
clone git github.com/docker/distribution 47a064d4195a9b56133891bbb13620c3ac83a827
clone git github.com/vbatts/tar-split v0.9.11
clone git github.com/opencontainers/runc 47e3f834d73e76bc2a6a585b48d2a93325b34979 # libcontainer
clean

View File

@@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Microsoft Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -18,7 +18,7 @@ import (
// Common constants for daemon and client.
const (
// Version of Current REST API
DefaultVersion version.Version = "1.22"
DefaultVersion version.Version = "1.23"
// MinVersion represents Minimum REST API version supported
MinVersion version.Version = "1.12"

View File

@@ -18,6 +18,8 @@ const (
FsMagicExtfs = FsMagic(0x0000EF53)
// FsMagicF2fs filesystem id for F2fs
FsMagicF2fs = FsMagic(0xF2F52010)
// FsMagicGPFS filesystem id for GPFS
FsMagicGPFS = FsMagic(0x47504653)
// FsMagicJffs2Fs filesystem if for Jffs2Fs
FsMagicJffs2Fs = FsMagic(0x000072b6)
// FsMagicJfs filesystem id for Jfs
@@ -60,6 +62,7 @@ var (
FsMagicCramfs: "cramfs",
FsMagicExtfs: "extfs",
FsMagicF2fs: "f2fs",
FsMagicGPFS: "gpfs",
FsMagicJffs2Fs: "jffs2",
FsMagicJfs: "jfs",
FsMagicNfsFs: "nfs",

View File

@@ -3,7 +3,6 @@ package distribution
import (
"fmt"
"os"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api"
@@ -97,13 +96,12 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo
}
var (
// use a slice to append the error strings and return a joined string to caller
errors []string
lastErr error
// discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport
// By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in errors.
// By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in lastErr.
// As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of
// any subsequent ErrNoSupport errors in errors.
// any subsequent ErrNoSupport errors in lastErr.
// It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be
// returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant
// error is the ones from v2 endpoints not v1.
@@ -123,7 +121,7 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo
puller, err := newPuller(endpoint, repoInfo, imagePullConfig)
if err != nil {
errors = append(errors, err.Error())
lastErr = err
continue
}
if err := puller.Pull(ctx, ref); err != nil {
@@ -144,34 +142,28 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo
// Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors.
discardNoSupportErrors = true
// append subsequent errors
errors = append(errors, err.Error())
lastErr = err
} else if !discardNoSupportErrors {
// Save the ErrNoSupport error, because it's either the first error or all encountered errors
// were also ErrNoSupport errors.
// append subsequent errors
errors = append(errors, err.Error())
lastErr = err
}
continue
}
errors = append(errors, err.Error())
logrus.Debugf("Not continuing with error: %v", fmt.Errorf(strings.Join(errors, "\n")))
if len(errors) > 0 {
return fmt.Errorf(strings.Join(errors, "\n"))
}
logrus.Debugf("Not continuing with error: %v", err)
return err
}
imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull")
return nil
}
if len(errors) == 0 {
return fmt.Errorf("no endpoints found for %s", ref.String())
if lastErr == nil {
lastErr = fmt.Errorf("no endpoints found for %s", ref.String())
}
if len(errors) > 0 {
return fmt.Errorf(strings.Join(errors, "\n"))
}
return nil
return lastErr
}
// writeStatus writes a status message to out. If layersDownloaded is true, the

View File

@@ -311,6 +311,8 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
case distribution.ErrBlobMounted:
progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name())
err.Descriptor.MediaType = schema2.MediaTypeLayer
pd.pushState.Lock()
pd.pushState.confirmedV2 = true
pd.pushState.remoteLayers[diffID] = err.Descriptor

View File

@@ -6,6 +6,7 @@ import (
"net/http"
"net/url"
"strings"
"syscall"
"time"
"github.com/docker/distribution"
@@ -145,8 +146,14 @@ func retryOnError(err error) error {
case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied:
return xfer.DoNotRetry{Err: err}
}
case *url.Error:
return retryOnError(v.Err)
case *client.UnexpectedHTTPResponseError:
return xfer.DoNotRetry{Err: err}
case error:
if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) {
return xfer.DoNotRetry{Err: err}
}
}
// let's be nice and fallback if the error is a completely
// unexpected one.

View File

@@ -10,6 +10,7 @@ import (
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
@@ -154,7 +155,7 @@ func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) {
return "", err
}
dgst, err := digest.ParseDigest(string(content))
dgst, err := digest.ParseDigest(strings.TrimSpace(string(content)))
if err != nil {
return "", err
}
@@ -168,7 +169,7 @@ func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) {
return "", err
}
dgst, err := digest.ParseDigest(string(content))
dgst, err := digest.ParseDigest(strings.TrimSpace(string(content)))
if err != nil {
return "", err
}
@@ -177,16 +178,17 @@ func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) {
}
func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) {
content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id"))
contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id"))
if err != nil {
return "", err
}
content := strings.TrimSpace(string(contentBytes))
if !stringIDRegexp.MatchString(string(content)) {
if !stringIDRegexp.MatchString(content) {
return "", errors.New("invalid cache id value")
}
return string(content), nil
return content, nil
}
func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) {
@@ -227,32 +229,34 @@ func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error
}
func (fms *fileMetadataStore) GetMountID(mount string) (string, error) {
content, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id"))
contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id"))
if err != nil {
return "", err
}
content := strings.TrimSpace(string(contentBytes))
if !stringIDRegexp.MatchString(string(content)) {
if !stringIDRegexp.MatchString(content) {
return "", errors.New("invalid mount id value")
}
return string(content), nil
return content, nil
}
func (fms *fileMetadataStore) GetInitID(mount string) (string, error) {
content, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id"))
contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id"))
if err != nil {
if os.IsNotExist(err) {
return "", nil
}
return "", err
}
content := strings.TrimSpace(string(contentBytes))
if !stringIDRegexp.MatchString(string(content)) {
if !stringIDRegexp.MatchString(content) {
return "", errors.New("invalid init id value")
}
return string(content), nil
return content, nil
}
func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) {
@@ -264,7 +268,7 @@ func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) {
return "", err
}
dgst, err := digest.ParseDigest(string(content))
dgst, err := digest.ParseDigest(strings.TrimSpace(string(content)))
if err != nil {
return "", err
}

View File

@@ -86,6 +86,7 @@ func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (St
l, err := ls.loadLayer(id)
if err != nil {
logrus.Debugf("Failed to load layer %s: %s", id, err)
continue
}
if l.parent != nil {
l.parent.referenceCount++
@@ -109,22 +110,22 @@ func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) {
diff, err := ls.store.GetDiffID(layer)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err)
}
size, err := ls.store.GetSize(layer)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to get size for %s: %s", layer, err)
}
cacheID, err := ls.store.GetCacheID(layer)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err)
}
parent, err := ls.store.GetParent(layer)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err)
}
cl = &roLayer{

View File

@@ -1163,7 +1163,7 @@ func (fs *FlagSet) ReportError(str string, withHelp bool) {
str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'"
}
}
fmt.Fprintf(fs.Out(), "docker: %s.\n", str)
fmt.Fprintf(fs.Out(), "%s: %s.\n", os.Args[0], str)
}
// Parsed reports whether fs.Parse has been called.
@@ -1223,11 +1223,27 @@ func (v mergeVal) IsBoolFlag() bool {
return false
}
// Name returns the name of a mergeVal.
// If the original value had a name, return the original name,
// otherwise, return the key asinged to this mergeVal.
func (v mergeVal) Name() string {
type namedValue interface {
Name() string
}
if nVal, ok := v.Value.(namedValue); ok {
return nVal.Name()
}
return v.key
}
// Merge is an helper function that merges n FlagSets into a single dest FlagSet
// In case of name collision between the flagsets it will apply
// the destination FlagSet's errorHandling behavior.
func Merge(dest *FlagSet, flagsets ...*FlagSet) error {
for _, fset := range flagsets {
if fset.formal == nil {
continue
}
for k, f := range fset.formal {
if _, ok := dest.formal[k]; ok {
var err error
@@ -1249,6 +1265,9 @@ func Merge(dest *FlagSet, flagsets ...*FlagSet) error {
}
newF := *f
newF.Value = mergeVal{f.Value, k, fset}
if dest.formal == nil {
dest.formal = make(map[string]*Flag)
}
dest.formal[k] = &newF
}
}

View File

@@ -3,21 +3,20 @@
package term
import (
"fmt"
"io"
"os"
"os/signal"
"syscall"
"github.com/Azure/go-ansiterm/winterm"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/term/windows"
)
// State holds the console mode for the terminal.
type State struct {
mode uint32
inMode, outMode uint32
inHandle, outHandle syscall.Handle
}
// Winsize is used for window size.
@@ -28,6 +27,15 @@ type Winsize struct {
y uint16
}
const (
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
enableVirtualTerminalInput = 0x0200
enableVirtualTerminalProcessing = 0x0004
)
// usingNativeConsole is true if we are using the Windows native console
var usingNativeConsole bool
// StdStreams returns the standard streams (stdin, stdout, stedrr).
func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
switch {
@@ -39,6 +47,7 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
return windows.ConsoleStreams()
default:
if useNativeConsole() {
usingNativeConsole = true
return os.Stdin, os.Stdout, os.Stderr
}
return windows.ConsoleStreams()
@@ -54,7 +63,7 @@ func useNativeConsole() bool {
return false
}
// Native console is not available major version 10
// Native console is not available before major version 10
if osv.MajorVersion < 10 {
return false
}
@@ -64,6 +73,17 @@ func useNativeConsole() bool {
return false
}
// Get the console modes. If this fails, we can't use the native console
state, err := getNativeConsole()
if err != nil {
return false
}
// Probe the console to see if it can be enabled.
if nil != probeNativeConsole(state) {
return false
}
// Environment variable override
if e := os.Getenv("USE_NATIVE_CONSOLE"); e != "" {
if e == "1" {
@@ -72,32 +92,86 @@ func useNativeConsole() bool {
return false
}
// Get the handle to stdout
stdOutHandle, err := syscall.GetStdHandle(syscall.STD_OUTPUT_HANDLE)
if err != nil {
return false
}
// Get the console mode from the consoles stdout handle
var mode uint32
if err := syscall.GetConsoleMode(stdOutHandle, &mode); err != nil {
return false
}
// Legacy mode does not have native ANSI emulation.
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
const enableVirtualTerminalProcessing = 0x0004
if mode&enableVirtualTerminalProcessing == 0 {
return false
}
// TODO Windows (Post TP4). The native emulator still has issues which
// TODO Windows. The native emulator still has issues which
// mean it shouldn't be enabled for everyone. Change this next line to true
// to change the default to "enable if available". In the meantime, users
// can still try it out by using USE_NATIVE_CONSOLE env variable.
return false
}
// getNativeConsole returns the console modes ('state') for the native Windows console
func getNativeConsole() (State, error) {
var (
err error
state State
)
// Get the handle to stdout
if state.outHandle, err = syscall.GetStdHandle(syscall.STD_OUTPUT_HANDLE); err != nil {
return state, err
}
// Get the console mode from the consoles stdout handle
if err = syscall.GetConsoleMode(state.outHandle, &state.outMode); err != nil {
return state, err
}
// Get the handle to stdin
if state.inHandle, err = syscall.GetStdHandle(syscall.STD_INPUT_HANDLE); err != nil {
return state, err
}
// Get the console mode from the consoles stdin handle
if err = syscall.GetConsoleMode(state.inHandle, &state.inMode); err != nil {
return state, err
}
return state, nil
}
// probeNativeConsole probes the console to determine if native can be supported,
func probeNativeConsole(state State) error {
if err := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode|enableVirtualTerminalProcessing); err != nil {
return err
}
defer winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode)
if err := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode|enableVirtualTerminalInput); err != nil {
return err
}
defer winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode)
return nil
}
// enableNativeConsole turns on native console mode
func enableNativeConsole(state State) error {
if err := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode|enableVirtualTerminalProcessing); err != nil {
return err
}
if err := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode|enableVirtualTerminalInput); err != nil {
winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode) // restore out if we can
return err
}
return nil
}
// disableNativeConsole turns off native console mode
func disableNativeConsole(state *State) error {
// Try and restore both in an out before error checking.
errout := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode)
errin := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode)
if errout != nil {
return errout
}
if errin != nil {
return errin
}
return nil
}
// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
func GetFdInfo(in interface{}) (uintptr, bool) {
return windows.GetHandleInfo(in)
@@ -105,7 +179,6 @@ func GetFdInfo(in interface{}) (uintptr, bool) {
// GetWinsize returns the window size based on the specified file descriptor.
func GetWinsize(fd uintptr) (*Winsize, error) {
info, err := winterm.GetConsoleScreenBufferInfo(fd)
if err != nil {
return nil, err
@@ -117,58 +190,9 @@ func GetWinsize(fd uintptr) (*Winsize, error) {
x: 0,
y: 0}
// Note: GetWinsize is called frequently -- uncomment only for excessive details
// logrus.Debugf("[windows] GetWinsize: Console(%v)", info.String())
// logrus.Debugf("[windows] GetWinsize: Width(%v), Height(%v), x(%v), y(%v)", winsize.Width, winsize.Height, winsize.x, winsize.y)
return winsize, nil
}
// SetWinsize tries to set the specified window size for the specified file descriptor.
func SetWinsize(fd uintptr, ws *Winsize) error {
// Ensure the requested dimensions are no larger than the maximum window size
info, err := winterm.GetConsoleScreenBufferInfo(fd)
if err != nil {
return err
}
if ws.Width == 0 || ws.Height == 0 || ws.Width > uint16(info.MaximumWindowSize.X) || ws.Height > uint16(info.MaximumWindowSize.Y) {
return fmt.Errorf("Illegal window size: (%v,%v) -- Maximum allow: (%v,%v)",
ws.Width, ws.Height, info.MaximumWindowSize.X, info.MaximumWindowSize.Y)
}
// Narrow the sizes to that used by Windows
width := winterm.SHORT(ws.Width)
height := winterm.SHORT(ws.Height)
// Set the dimensions while ensuring they remain within the bounds of the backing console buffer
// -- Shrinking will always succeed. Growing may push the edges past the buffer boundary. When that occurs,
// shift the upper left just enough to keep the new window within the buffer.
rect := info.Window
if width < rect.Right-rect.Left+1 {
rect.Right = rect.Left + width - 1
} else if width > rect.Right-rect.Left+1 {
rect.Right = rect.Left + width - 1
if rect.Right >= info.Size.X {
rect.Left = info.Size.X - width
rect.Right = info.Size.X - 1
}
}
if height < rect.Bottom-rect.Top+1 {
rect.Bottom = rect.Top + height - 1
} else if height > rect.Bottom-rect.Top+1 {
rect.Bottom = rect.Top + height - 1
if rect.Bottom >= info.Size.Y {
rect.Top = info.Size.Y - height
rect.Bottom = info.Size.Y - 1
}
}
logrus.Debugf("[windows] SetWinsize: Requested((%v,%v)) Actual(%v)", ws.Width, ws.Height, rect)
return winterm.SetConsoleWindowInfo(fd, true, rect)
}
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd uintptr) bool {
return windows.IsConsole(fd)
@@ -177,25 +201,36 @@ func IsTerminal(fd uintptr) bool {
// RestoreTerminal restores the terminal connected to the given file descriptor
// to a previous state.
func RestoreTerminal(fd uintptr, state *State) error {
return winterm.SetConsoleMode(fd, state.mode)
if usingNativeConsole {
return disableNativeConsole(state)
}
return winterm.SetConsoleMode(fd, state.outMode)
}
// SaveState saves the state of the terminal connected to the given file descriptor.
func SaveState(fd uintptr) (*State, error) {
if usingNativeConsole {
state, err := getNativeConsole()
if err != nil {
return nil, err
}
return &state, nil
}
mode, e := winterm.GetConsoleMode(fd)
if e != nil {
return nil, e
}
return &State{mode}, nil
return &State{outMode: mode}, nil
}
// DisableEcho disables echo for the terminal connected to the given file descriptor.
// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
func DisableEcho(fd uintptr, state *State) error {
mode := state.mode
mode := state.inMode
mode &^= winterm.ENABLE_ECHO_INPUT
mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT
err := winterm.SetConsoleMode(fd, mode)
if err != nil {
return err
@@ -227,10 +262,17 @@ func MakeRaw(fd uintptr) (*State, error) {
return nil, err
}
mode := state.inMode
if usingNativeConsole {
if err := enableNativeConsole(*state); err != nil {
return nil, err
}
mode |= enableVirtualTerminalInput
}
// See
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
mode := state.mode
// Disable these modes
mode &^= winterm.ENABLE_ECHO_INPUT

View File

@@ -109,7 +109,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error {
keyName := certName[:len(certName)-5] + ".key"
logrus.Debugf("cert: %s", filepath.Join(directory, f.Name()))
if !hasFile(fs, keyName) {
return fmt.Errorf("Missing key %s for certificate %s", keyName, certName)
return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName)
}
cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName))
if err != nil {
@@ -122,7 +122,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error {
certName := keyName[:len(keyName)-4] + ".cert"
logrus.Debugf("key: %s", filepath.Join(directory, f.Name()))
if !hasFile(fs, certName) {
return fmt.Errorf("Missing certificate %s for key %s", certName, keyName)
return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName)
}
}
}

View File

@@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,223 +0,0 @@
// Package nat is a convenience package for manipulation of strings describing network ports.
package nat
import (
"fmt"
"net"
"strconv"
"strings"
)
const (
// portSpecTemplate is the expected format for port specifications
portSpecTemplate = "ip:hostPort:containerPort"
)
// PortBinding represents a binding between a Host IP address and a Host Port
type PortBinding struct {
// HostIP is the host IP Address
HostIP string `json:"HostIp"`
// HostPort is the host port number
HostPort string
}
// PortMap is a collection of PortBinding indexed by Port
type PortMap map[Port][]PortBinding
// PortSet is a collection of structs indexed by Port
type PortSet map[Port]struct{}
// Port is a string containing port number and protocol in the format "80/tcp"
type Port string
// NewPort creates a new instance of a Port given a protocol and port number or port range
func NewPort(proto, port string) (Port, error) {
// Check for parsing issues on "port" now so we can avoid having
// to check it later on.
portStartInt, portEndInt, err := ParsePortRangeToInt(port)
if err != nil {
return "", err
}
if portStartInt == portEndInt {
return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil
}
return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil
}
// ParsePort parses the port number string and returns an int
func ParsePort(rawPort string) (int, error) {
if len(rawPort) == 0 {
return 0, nil
}
port, err := strconv.ParseUint(rawPort, 10, 16)
if err != nil {
return 0, err
}
return int(port), nil
}
// ParsePortRangeToInt parses the port range string and returns start/end ints
func ParsePortRangeToInt(rawPort string) (int, int, error) {
if len(rawPort) == 0 {
return 0, 0, nil
}
start, end, err := ParsePortRange(rawPort)
if err != nil {
return 0, 0, err
}
return int(start), int(end), nil
}
// Proto returns the protocol of a Port
func (p Port) Proto() string {
proto, _ := SplitProtoPort(string(p))
return proto
}
// Port returns the port number of a Port
func (p Port) Port() string {
_, port := SplitProtoPort(string(p))
return port
}
// Int returns the port number of a Port as an int
func (p Port) Int() int {
portStr := p.Port()
if len(portStr) == 0 {
return 0
}
// We don't need to check for an error because we're going to
// assume that any error would have been found, and reported, in NewPort()
port, _ := strconv.ParseUint(portStr, 10, 16)
return int(port)
}
// Range returns the start/end port numbers of a Port range as ints
func (p Port) Range() (int, int, error) {
return ParsePortRangeToInt(p.Port())
}
// SplitProtoPort splits a port in the format of proto/port
func SplitProtoPort(rawPort string) (string, string) {
parts := strings.Split(rawPort, "/")
l := len(parts)
if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 {
return "", ""
}
if l == 1 {
return "tcp", rawPort
}
if len(parts[1]) == 0 {
return "tcp", parts[0]
}
return parts[1], parts[0]
}
func validateProto(proto string) bool {
for _, availableProto := range []string{"tcp", "udp"} {
if availableProto == proto {
return true
}
}
return false
}
// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses
// these in to the internal types
func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) {
var (
exposedPorts = make(map[Port]struct{}, len(ports))
bindings = make(map[Port][]PortBinding)
)
for _, rawPort := range ports {
proto := "tcp"
if i := strings.LastIndex(rawPort, "/"); i != -1 {
proto = rawPort[i+1:]
rawPort = rawPort[:i]
}
if !strings.Contains(rawPort, ":") {
rawPort = fmt.Sprintf("::%s", rawPort)
} else if len(strings.Split(rawPort, ":")) == 2 {
rawPort = fmt.Sprintf(":%s", rawPort)
}
parts, err := PartParser(portSpecTemplate, rawPort)
if err != nil {
return nil, nil, err
}
var (
containerPort = parts["containerPort"]
rawIP = parts["ip"]
hostPort = parts["hostPort"]
)
if rawIP != "" && net.ParseIP(rawIP) == nil {
return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIP)
}
if containerPort == "" {
return nil, nil, fmt.Errorf("No port specified: %s<empty>", rawPort)
}
startPort, endPort, err := ParsePortRange(containerPort)
if err != nil {
return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort)
}
var startHostPort, endHostPort uint64 = 0, 0
if len(hostPort) > 0 {
startHostPort, endHostPort, err = ParsePortRange(hostPort)
if err != nil {
return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort)
}
}
if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) {
// Allow host port range iff containerPort is not a range.
// In this case, use the host port range as the dynamic
// host port range to allocate into.
if endPort != startPort {
return nil, nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort)
}
}
if !validateProto(strings.ToLower(proto)) {
return nil, nil, fmt.Errorf("Invalid proto: %s", proto)
}
for i := uint64(0); i <= (endPort - startPort); i++ {
containerPort = strconv.FormatUint(startPort+i, 10)
if len(hostPort) > 0 {
hostPort = strconv.FormatUint(startHostPort+i, 10)
}
// Set hostPort to a range only if there is a single container port
// and a dynamic host port.
if startPort == endPort && startHostPort != endHostPort {
hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10))
}
port, err := NewPort(strings.ToLower(proto), containerPort)
if err != nil {
return nil, nil, err
}
if _, exists := exposedPorts[port]; !exists {
exposedPorts[port] = struct{}{}
}
binding := PortBinding{
HostIP: rawIP,
HostPort: hostPort,
}
bslice, exists := bindings[port]
if !exists {
bslice = []PortBinding{}
}
bindings[port] = append(bslice, binding)
}
}
return exposedPorts, bindings, nil
}

View File

@@ -1,56 +0,0 @@
package nat
import (
"fmt"
"strconv"
"strings"
)
// PartParser parses and validates the specified string (data) using the specified template
// e.g. ip:public:private -> 192.168.0.1:80:8000
func PartParser(template, data string) (map[string]string, error) {
// ip:public:private
var (
templateParts = strings.Split(template, ":")
parts = strings.Split(data, ":")
out = make(map[string]string, len(templateParts))
)
if len(parts) != len(templateParts) {
return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
}
for i, t := range templateParts {
value := ""
if len(parts) > i {
value = parts[i]
}
out[t] = value
}
return out, nil
}
// ParsePortRange parses and validates the specified string as a port-range (8000-9000)
func ParsePortRange(ports string) (uint64, uint64, error) {
if ports == "" {
return 0, 0, fmt.Errorf("Empty string specified for ports.")
}
if !strings.Contains(ports, "-") {
start, err := strconv.ParseUint(ports, 10, 16)
end := start
return start, end, err
}
parts := strings.Split(ports, "-")
start, err := strconv.ParseUint(parts[0], 10, 16)
if err != nil {
return 0, 0, err
}
end, err := strconv.ParseUint(parts[1], 10, 16)
if err != nil {
return 0, 0, err
}
if end < start {
return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports)
}
return start, end, nil
}

View File

@@ -1,96 +0,0 @@
package nat
import (
"sort"
"strings"
)
type portSorter struct {
ports []Port
by func(i, j Port) bool
}
func (s *portSorter) Len() int {
return len(s.ports)
}
func (s *portSorter) Swap(i, j int) {
s.ports[i], s.ports[j] = s.ports[j], s.ports[i]
}
func (s *portSorter) Less(i, j int) bool {
ip := s.ports[i]
jp := s.ports[j]
return s.by(ip, jp)
}
// Sort sorts a list of ports using the provided predicate
// This function should compare `i` and `j`, returning true if `i` is
// considered to be less than `j`
func Sort(ports []Port, predicate func(i, j Port) bool) {
s := &portSorter{ports, predicate}
sort.Sort(s)
}
type portMapEntry struct {
port Port
binding PortBinding
}
type portMapSorter []portMapEntry
func (s portMapSorter) Len() int { return len(s) }
func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// sort the port so that the order is:
// 1. port with larger specified bindings
// 2. larger port
// 3. port with tcp protocol
func (s portMapSorter) Less(i, j int) bool {
pi, pj := s[i].port, s[j].port
hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort)
return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp")
}
// SortPortMap sorts the list of ports and their respected mapping. The ports
// will explicit HostPort will be placed first.
func SortPortMap(ports []Port, bindings PortMap) {
s := portMapSorter{}
for _, p := range ports {
if binding, ok := bindings[p]; ok {
for _, b := range binding {
s = append(s, portMapEntry{port: p, binding: b})
}
bindings[p] = []PortBinding{}
} else {
s = append(s, portMapEntry{port: p})
}
}
sort.Sort(s)
var (
i int
pm = make(map[Port]struct{})
)
// reorder ports
for _, entry := range s {
if _, ok := pm[entry.port]; !ok {
ports[i] = entry.port
pm[entry.port] = struct{}{}
i++
}
// reorder bindings for this port
if _, ok := bindings[entry.port]; ok {
bindings[entry.port] = append(bindings[entry.port], entry.binding)
}
}
}
func toInt(s string) uint64 {
i, _, err := ParsePortRange(s)
if err != nil {
i = 0
}
return i
}

View File

@@ -1,133 +0,0 @@
// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
//
// As a reminder from https://golang.org/pkg/crypto/tls/#Config:
// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified.
// A Config may be reused; the tls package will also not modify it.
package tlsconfig
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"os"
"github.com/Sirupsen/logrus"
)
// Options represents the information needed to create client and server TLS configurations.
type Options struct {
CAFile string
// If either CertFile or KeyFile is empty, Client() will not load them
// preventing the client from authenticating to the server.
// However, Server() requires them and will error out if they are empty.
CertFile string
KeyFile string
// client-only option
InsecureSkipVerify bool
// server-only option
ClientAuth tls.ClientAuthType
}
// Extra (server-side) accepted CBC cipher suites - will phase out in the future
var acceptedCBCCiphers = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
}
// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
var clientCipherSuites = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
}
// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls
// options struct but wants to use a commonly accepted set of TLS cipher suites, with
// known weak algorithms removed.
var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...)
// ServerDefault is a secure-enough TLS configuration for the server TLS configuration.
var ServerDefault = tls.Config{
// Avoid fallback to SSL protocols < TLS1.0
MinVersion: tls.VersionTLS10,
PreferServerCipherSuites: true,
CipherSuites: DefaultServerAcceptedCiphers,
}
// ClientDefault is a secure-enough TLS configuration for the client TLS configuration.
var ClientDefault = tls.Config{
// Prefer TLS1.2 as the client minimum
MinVersion: tls.VersionTLS12,
CipherSuites: clientCipherSuites,
}
// certPool returns an X.509 certificate pool from `caFile`, the certificate file.
func certPool(caFile string) (*x509.CertPool, error) {
// If we should verify the server, we need to load a trusted ca
certPool := x509.NewCertPool()
pem, err := ioutil.ReadFile(caFile)
if err != nil {
return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err)
}
if !certPool.AppendCertsFromPEM(pem) {
return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile)
}
s := certPool.Subjects()
subjects := make([]string, len(s))
for i, subject := range s {
subjects[i] = string(subject)
}
logrus.Debugf("Trusting certs with subjects: %v", subjects)
return certPool, nil
}
// Client returns a TLS configuration meant to be used by a client.
func Client(options Options) (*tls.Config, error) {
tlsConfig := ClientDefault
tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify
if !options.InsecureSkipVerify {
CAs, err := certPool(options.CAFile)
if err != nil {
return nil, err
}
tlsConfig.RootCAs = CAs
}
if options.CertFile != "" && options.KeyFile != "" {
tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
if err != nil {
return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err)
}
tlsConfig.Certificates = []tls.Certificate{tlsCert}
}
return &tlsConfig, nil
}
// Server returns a TLS configuration meant to be used by a server.
func Server(options Options) (*tls.Config, error) {
tlsConfig := ServerDefault
tlsConfig.ClientAuth = options.ClientAuth
tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
if err != nil {
if os.IsNotExist(err) {
return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err)
}
return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err)
}
tlsConfig.Certificates = []tls.Certificate{tlsCert}
if options.ClientAuth >= tls.VerifyClientCertIfGiven {
CAs, err := certPool(options.CAFile)
if err != nil {
return nil, err
}
tlsConfig.ClientCAs = CAs
}
return &tlsConfig, nil
}

View File

@@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,13 +0,0 @@
[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units)
# Introduction
go-units is a library to transform human friendly measurements into machine friendly values.
## Usage
See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation.
## License
go-units is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text.

View File

@@ -1,11 +0,0 @@
dependencies:
post:
# install golint
- go get github.com/golang/lint/golint
test:
pre:
# run analysis before tests
- go vet ./...
- test -z "$(golint ./... | tee /dev/stderr)"
- test -z "$(gofmt -s -l . | tee /dev/stderr)"

View File

@@ -1,33 +0,0 @@
// Package units provides helper function to parse and print size and time units
// in human-readable format.
package units
import (
"fmt"
"time"
)
// HumanDuration returns a human-readable approximation of a duration
// (eg. "About a minute", "4 hours ago", etc.).
func HumanDuration(d time.Duration) string {
if seconds := int(d.Seconds()); seconds < 1 {
return "Less than a second"
} else if seconds < 60 {
return fmt.Sprintf("%d seconds", seconds)
} else if minutes := int(d.Minutes()); minutes == 1 {
return "About a minute"
} else if minutes < 60 {
return fmt.Sprintf("%d minutes", minutes)
} else if hours := int(d.Hours()); hours == 1 {
return "About an hour"
} else if hours < 48 {
return fmt.Sprintf("%d hours", hours)
} else if hours < 24*7*2 {
return fmt.Sprintf("%d days", hours/24)
} else if hours < 24*30*3 {
return fmt.Sprintf("%d weeks", hours/24/7)
} else if hours < 24*365*2 {
return fmt.Sprintf("%d months", hours/24/30)
}
return fmt.Sprintf("%d years", int(d.Hours())/24/365)
}

View File

@@ -1,95 +0,0 @@
package units
import (
"fmt"
"regexp"
"strconv"
"strings"
)
// See: http://en.wikipedia.org/wiki/Binary_prefix
const (
// Decimal
KB = 1000
MB = 1000 * KB
GB = 1000 * MB
TB = 1000 * GB
PB = 1000 * TB
// Binary
KiB = 1024
MiB = 1024 * KiB
GiB = 1024 * MiB
TiB = 1024 * GiB
PiB = 1024 * TiB
)
type unitMap map[string]int64
var (
decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`)
)
var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
// CustomSize returns a human-readable approximation of a size
// using custom format.
func CustomSize(format string, size float64, base float64, _map []string) string {
i := 0
for size >= base {
size = size / base
i++
}
return fmt.Sprintf(format, size, _map[i])
}
// HumanSize returns a human-readable approximation of a size
// capped at 4 valid numbers (eg. "2.746 MB", "796 KB").
func HumanSize(size float64) string {
return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs)
}
// BytesSize returns a human-readable size in bytes, kibibytes,
// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
func BytesSize(size float64) string {
return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
}
// FromHumanSize returns an integer from a human-readable specification of a
// size using SI standard (eg. "44kB", "17MB").
func FromHumanSize(size string) (int64, error) {
return parseSize(size, decimalMap)
}
// RAMInBytes parses a human-readable string representing an amount of RAM
// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
// returns the number of bytes, or -1 if the string is unparseable.
// Units are case-insensitive, and the 'b' suffix is optional.
func RAMInBytes(size string) (int64, error) {
return parseSize(size, binaryMap)
}
// Parses the human-readable size string into the amount it represents.
func parseSize(sizeStr string, uMap unitMap) (int64, error) {
matches := sizeRegex.FindStringSubmatch(sizeStr)
if len(matches) != 3 {
return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
}
size, err := strconv.ParseInt(matches[1], 10, 0)
if err != nil {
return -1, err
}
unitPrefix := strings.ToLower(matches[2])
if mul, ok := uMap[unitPrefix]; ok {
size *= mul
}
return size, nil
}

View File

@@ -1,109 +0,0 @@
package units
import (
"fmt"
"strconv"
"strings"
)
// Ulimit is a human friendly version of Rlimit.
type Ulimit struct {
Name string
Hard int64
Soft int64
}
// Rlimit specifies the resource limits, such as max open files.
type Rlimit struct {
Type int `json:"type,omitempty"`
Hard uint64 `json:"hard,omitempty"`
Soft uint64 `json:"soft,omitempty"`
}
const (
// magic numbers for making the syscall
// some of these are defined in the syscall package, but not all.
// Also since Windows client doesn't get access to the syscall package, need to
// define these here
rlimitAs = 9
rlimitCore = 4
rlimitCPU = 0
rlimitData = 2
rlimitFsize = 1
rlimitLocks = 10
rlimitMemlock = 8
rlimitMsgqueue = 12
rlimitNice = 13
rlimitNofile = 7
rlimitNproc = 6
rlimitRss = 5
rlimitRtprio = 14
rlimitRttime = 15
rlimitSigpending = 11
rlimitStack = 3
)
var ulimitNameMapping = map[string]int{
//"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
"core": rlimitCore,
"cpu": rlimitCPU,
"data": rlimitData,
"fsize": rlimitFsize,
"locks": rlimitLocks,
"memlock": rlimitMemlock,
"msgqueue": rlimitMsgqueue,
"nice": rlimitNice,
"nofile": rlimitNofile,
"nproc": rlimitNproc,
"rss": rlimitRss,
"rtprio": rlimitRtprio,
"rttime": rlimitRttime,
"sigpending": rlimitSigpending,
"stack": rlimitStack,
}
// ParseUlimit parses and returns a Ulimit from the specified string.
func ParseUlimit(val string) (*Ulimit, error) {
parts := strings.SplitN(val, "=", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("invalid ulimit argument: %s", val)
}
if _, exists := ulimitNameMapping[parts[0]]; !exists {
return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
}
limitVals := strings.SplitN(parts[1], ":", 2)
if len(limitVals) > 2 {
return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
}
soft, err := strconv.ParseInt(limitVals[0], 10, 64)
if err != nil {
return nil, err
}
hard := soft // in case no hard was set
if len(limitVals) == 2 {
hard, err = strconv.ParseInt(limitVals[1], 10, 64)
}
if soft > hard {
return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, hard)
}
return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil
}
// GetRlimit returns the RLimit corresponding to Ulimit.
func (u *Ulimit) GetRlimit() (*Rlimit, error) {
t, exists := ulimitNameMapping[u.Name]
if !exists {
return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
}
return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
}
func (u *Ulimit) String() string {
return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
}

View File

@@ -1,13 +0,0 @@
# Contributing to libtrust
Want to hack on libtrust? Awesome! Here are instructions to get you
started.
libtrust is a part of the [Docker](https://www.docker.com) project, and follows
the same rules and principles. If you're already familiar with the way
Docker does things, you'll feel right at home.
Otherwise, go read
[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
Happy hacking!

View File

@@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,3 +0,0 @@
Solomon Hykes <solomon@docker.com>
Josh Hawn <josh@docker.com> (github: jlhawn)
Derek McGowan <derek@docker.com> (github: dmcgowan)

View File

@@ -1,18 +0,0 @@
# libtrust
Libtrust is library for managing authentication and authorization using public key cryptography.
Authentication is handled using the identity attached to the public key.
Libtrust provides multiple methods to prove possession of the private key associated with an identity.
- TLS x509 certificates
- Signature verification
- Key Challenge
Authorization and access control is managed through a distributed trust graph.
Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access.
## Copyright and license
Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
Docs released under Creative commons.

View File

@@ -1,175 +0,0 @@
package libtrust
import (
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"io/ioutil"
"math/big"
"net"
"time"
)
type certTemplateInfo struct {
commonName string
domains []string
ipAddresses []net.IP
isCA bool
clientAuth bool
serverAuth bool
}
func generateCertTemplate(info *certTemplateInfo) *x509.Certificate {
// Generate a certificate template which is valid from the past week to
// 10 years from now. The usage of the certificate depends on the
// specified fields in the given certTempInfo object.
var (
keyUsage x509.KeyUsage
extKeyUsage []x509.ExtKeyUsage
)
if info.isCA {
keyUsage = x509.KeyUsageCertSign
}
if info.clientAuth {
extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth)
}
if info.serverAuth {
extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth)
}
return &x509.Certificate{
SerialNumber: big.NewInt(0),
Subject: pkix.Name{
CommonName: info.commonName,
},
NotBefore: time.Now().Add(-time.Hour * 24 * 7),
NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10),
DNSNames: info.domains,
IPAddresses: info.ipAddresses,
IsCA: info.isCA,
KeyUsage: keyUsage,
ExtKeyUsage: extKeyUsage,
BasicConstraintsValid: info.isCA,
}
}
func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) {
pubCertTemplate := generateCertTemplate(subInfo)
privCertTemplate := generateCertTemplate(issInfo)
certDER, err := x509.CreateCertificate(
rand.Reader, pubCertTemplate, privCertTemplate,
pub.CryptoPublicKey(), priv.CryptoPrivateKey(),
)
if err != nil {
return nil, fmt.Errorf("failed to create certificate: %s", err)
}
cert, err = x509.ParseCertificate(certDER)
if err != nil {
return nil, fmt.Errorf("failed to parse certificate: %s", err)
}
return
}
// GenerateSelfSignedServerCert creates a self-signed certificate for the
// given key which is to be used for TLS servers with the given domains and
// IP addresses.
func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) {
info := &certTemplateInfo{
commonName: key.KeyID(),
domains: domains,
ipAddresses: ipAddresses,
serverAuth: true,
}
return generateCert(key.PublicKey(), key, info, info)
}
// GenerateSelfSignedClientCert creates a self-signed certificate for the
// given key which is to be used for TLS clients.
func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) {
info := &certTemplateInfo{
commonName: key.KeyID(),
clientAuth: true,
}
return generateCert(key.PublicKey(), key, info, info)
}
// GenerateCACert creates a certificate which can be used as a trusted
// certificate authority.
func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) {
subjectInfo := &certTemplateInfo{
commonName: trustedKey.KeyID(),
isCA: true,
}
issuerInfo := &certTemplateInfo{
commonName: signer.KeyID(),
}
return generateCert(trustedKey, signer, subjectInfo, issuerInfo)
}
// GenerateCACertPool creates a certificate authority pool to be used for a
// TLS configuration. Any self-signed certificates issued by the specified
// trusted keys will be verified during a TLS handshake
func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) {
certPool := x509.NewCertPool()
for _, trustedKey := range trustedKeys {
cert, err := GenerateCACert(signer, trustedKey)
if err != nil {
return nil, fmt.Errorf("failed to generate CA certificate: %s", err)
}
certPool.AddCert(cert)
}
return certPool, nil
}
// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded
// containing one or more certificates. The expected pem type is "CERTIFICATE".
func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
certificates := []*x509.Certificate{}
var block *pem.Block
block, b = pem.Decode(b)
for ; block != nil; block, b = pem.Decode(b) {
if block.Type == "CERTIFICATE" {
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certificates = append(certificates, cert)
} else {
return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
}
}
return certificates, nil
}
// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded
// containing one or more certificates. The expected pem type is "CERTIFICATE".
func LoadCertificatePool(filename string) (*x509.CertPool, error) {
certs, err := LoadCertificateBundle(filename)
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
for _, cert := range certs {
pool.AddCert(cert)
}
return pool, nil
}

View File

@@ -1,9 +0,0 @@
/*
Package libtrust provides an interface for managing authentication and
authorization using public key cryptography. Authentication is handled
using the identity attached to the public key and verified through TLS
x509 certificates, a key challenge, or signature. Authorization and
access control is managed through a trust graph distributed between
both remote trust servers and locally cached and managed data.
*/
package libtrust

View File

@@ -1,428 +0,0 @@
package libtrust
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"math/big"
)
/*
* EC DSA PUBLIC KEY
*/
// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital
// signature algorithms.
type ecPublicKey struct {
*ecdsa.PublicKey
curveName string
signatureAlgorithm *signatureAlgorithm
extended map[string]interface{}
}
func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) {
curve := cryptoPublicKey.Curve
switch {
case curve == elliptic.P256():
return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil
case curve == elliptic.P384():
return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil
case curve == elliptic.P521():
return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil
default:
return nil, errors.New("unsupported elliptic curve")
}
}
// KeyType returns the key type for elliptic curve keys, i.e., "EC".
func (k *ecPublicKey) KeyType() string {
return "EC"
}
// CurveName returns the elliptic curve identifier.
// Possible values are "P-256", "P-384", and "P-521".
func (k *ecPublicKey) CurveName() string {
return k.curveName
}
// KeyID returns a distinct identifier which is unique to this Public Key.
func (k *ecPublicKey) KeyID() string {
return keyIDFromCryptoKey(k)
}
func (k *ecPublicKey) String() string {
return fmt.Sprintf("EC Public Key <%s>", k.KeyID())
}
// Verify verifyies the signature of the data in the io.Reader using this
// PublicKey. The alg parameter should identify the digital signature
// algorithm which was used to produce the signature and should be supported
// by this public key. Returns a nil error if the signature is valid.
func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
// For EC keys there is only one supported signature algorithm depending
// on the curve parameters.
if k.signatureAlgorithm.HeaderParam() != alg {
return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg)
}
// signature is the concatenation of (r, s), base64Url encoded.
sigLength := len(signature)
expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3)
if sigLength != expectedOctetLength {
return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength)
}
rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:]
r := new(big.Int).SetBytes(rBytes)
s := new(big.Int).SetBytes(sBytes)
hasher := k.signatureAlgorithm.HashID().New()
_, err := io.Copy(hasher, data)
if err != nil {
return fmt.Errorf("error reading data to sign: %s", err)
}
hash := hasher.Sum(nil)
if !ecdsa.Verify(k.PublicKey, hash, r, s) {
return errors.New("invalid signature")
}
return nil
}
// CryptoPublicKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The type
// is either *rsa.PublicKey or *ecdsa.PublicKey
func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey {
return k.PublicKey
}
func (k *ecPublicKey) toMap() map[string]interface{} {
jwk := make(map[string]interface{})
for k, v := range k.extended {
jwk[k] = v
}
jwk["kty"] = k.KeyType()
jwk["kid"] = k.KeyID()
jwk["crv"] = k.CurveName()
xBytes := k.X.Bytes()
yBytes := k.Y.Bytes()
octetLength := (k.Params().BitSize + 7) >> 3
// MUST include leading zeros in the output so that x, y are each
// *octetLength* bytes long.
xBuf := make([]byte, octetLength-len(xBytes), octetLength)
yBuf := make([]byte, octetLength-len(yBytes), octetLength)
xBuf = append(xBuf, xBytes...)
yBuf = append(yBuf, yBytes...)
jwk["x"] = joseBase64UrlEncode(xBuf)
jwk["y"] = joseBase64UrlEncode(yBuf)
return jwk
}
// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
// elliptic curve keys.
func (k *ecPublicKey) MarshalJSON() (data []byte, err error) {
return json.Marshal(k.toMap())
}
// PEMBlock serializes this Public Key to DER-encoded PKIX format.
func (k *ecPublicKey) PEMBlock() (*pem.Block, error) {
derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
if err != nil {
return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err)
}
k.extended["kid"] = k.KeyID() // For display purposes.
return createPemBlock("PUBLIC KEY", derBytes, k.extended)
}
func (k *ecPublicKey) AddExtendedField(field string, value interface{}) {
k.extended[field] = value
}
func (k *ecPublicKey) GetExtendedField(field string) interface{} {
v, ok := k.extended[field]
if !ok {
return nil
}
return v
}
func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) {
// JWK key type (kty) has already been determined to be "EC".
// Need to extract 'crv', 'x', 'y', and 'kid' and check for
// consistency.
// Get the curve identifier value.
crv, err := stringFromMap(jwk, "crv")
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err)
}
var (
curve elliptic.Curve
sigAlg *signatureAlgorithm
)
switch {
case crv == "P-256":
curve = elliptic.P256()
sigAlg = es256
case crv == "P-384":
curve = elliptic.P384()
sigAlg = es384
case crv == "P-521":
curve = elliptic.P521()
sigAlg = es512
default:
return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv)
}
// Get the X and Y coordinates for the public key point.
xB64Url, err := stringFromMap(jwk, "x")
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
}
x, err := parseECCoordinate(xB64Url, curve)
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
}
yB64Url, err := stringFromMap(jwk, "y")
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
}
y, err := parseECCoordinate(yB64Url, curve)
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
}
key := &ecPublicKey{
PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y},
curveName: crv, signatureAlgorithm: sigAlg,
}
// Key ID is optional too, but if it exists, it should match the key.
_, ok := jwk["kid"]
if ok {
kid, err := stringFromMap(jwk, "kid")
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key ID: %s", err)
}
if kid != key.KeyID() {
return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid)
}
}
key.extended = jwk
return key, nil
}
/*
* EC DSA PRIVATE KEY
*/
// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature
// algorithms.
type ecPrivateKey struct {
ecPublicKey
*ecdsa.PrivateKey
}
func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) {
publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey)
if err != nil {
return nil, err
}
return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil
}
// PublicKey returns the Public Key data associated with this Private Key.
func (k *ecPrivateKey) PublicKey() PublicKey {
return &k.ecPublicKey
}
func (k *ecPrivateKey) String() string {
return fmt.Sprintf("EC Private Key <%s>", k.KeyID())
}
// Sign signs the data read from the io.Reader using a signature algorithm supported
// by the elliptic curve private key. If the specified hashing algorithm is
// supported by this key, that hash function is used to generate the signature
// otherwise the the default hashing algorithm for this key is used. Returns
// the signature and the name of the JWK signature algorithm used, e.g.,
// "ES256", "ES384", "ES512".
func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
// Generate a signature of the data using the internal alg.
// The given hashId is only a suggestion, and since EC keys only support
// on signature/hash algorithm given the curve name, we disregard it for
// the elliptic curve JWK signature implementation.
hasher := k.signatureAlgorithm.HashID().New()
_, err = io.Copy(hasher, data)
if err != nil {
return nil, "", fmt.Errorf("error reading data to sign: %s", err)
}
hash := hasher.Sum(nil)
r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash)
if err != nil {
return nil, "", fmt.Errorf("error producing signature: %s", err)
}
rBytes, sBytes := r.Bytes(), s.Bytes()
octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3
// MUST include leading zeros in the output
rBuf := make([]byte, octetLength-len(rBytes), octetLength)
sBuf := make([]byte, octetLength-len(sBytes), octetLength)
rBuf = append(rBuf, rBytes...)
sBuf = append(sBuf, sBytes...)
signature = append(rBuf, sBuf...)
alg = k.signatureAlgorithm.HeaderParam()
return
}
// CryptoPrivateKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The type
// is either *rsa.PublicKey or *ecdsa.PublicKey
func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
return k.PrivateKey
}
func (k *ecPrivateKey) toMap() map[string]interface{} {
jwk := k.ecPublicKey.toMap()
dBytes := k.D.Bytes()
// The length of this octet string MUST be ceiling(log-base-2(n)/8)
// octets (where n is the order of the curve). This is because the private
// key d must be in the interval [1, n-1] so the bitlength of d should be
// no larger than the bitlength of n-1. The easiest way to find the octet
// length is to take bitlength(n-1), add 7 to force a carry, and shift this
// bit sequence right by 3, which is essentially dividing by 8 and adding
// 1 if there is any remainder. Thus, the private key value d should be
// output to (bitlength(n-1)+7)>>3 octets.
n := k.ecPublicKey.Params().N
octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
// Create a buffer with the necessary zero-padding.
dBuf := make([]byte, octetLength-len(dBytes), octetLength)
dBuf = append(dBuf, dBytes...)
jwk["d"] = joseBase64UrlEncode(dBuf)
return jwk
}
// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
// elliptic curve keys.
func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) {
return json.Marshal(k.toMap())
}
// PEMBlock serializes this Private Key to DER-encoded PKIX format.
func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) {
derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey)
if err != nil {
return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err)
}
k.extended["keyID"] = k.KeyID() // For display purposes.
return createPemBlock("EC PRIVATE KEY", derBytes, k.extended)
}
func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) {
dB64Url, err := stringFromMap(jwk, "d")
if err != nil {
return nil, fmt.Errorf("JWK EC Private Key: %s", err)
}
// JWK key type (kty) has already been determined to be "EC".
// Need to extract the public key information, then extract the private
// key value 'd'.
publicKey, err := ecPublicKeyFromMap(jwk)
if err != nil {
return nil, err
}
d, err := parseECPrivateParam(dB64Url, publicKey.Curve)
if err != nil {
return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err)
}
key := &ecPrivateKey{
ecPublicKey: *publicKey,
PrivateKey: &ecdsa.PrivateKey{
PublicKey: *publicKey.PublicKey,
D: d,
},
}
return key, nil
}
/*
* Key Generation Functions.
*/
func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) {
k = new(ecPrivateKey)
k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
return nil, err
}
k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey
k.extended = make(map[string]interface{})
return
}
// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256.
func GenerateECP256PrivateKey() (PrivateKey, error) {
k, err := generateECPrivateKey(elliptic.P256())
if err != nil {
return nil, fmt.Errorf("error generating EC P-256 key: %s", err)
}
k.curveName = "P-256"
k.signatureAlgorithm = es256
return k, nil
}
// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384.
func GenerateECP384PrivateKey() (PrivateKey, error) {
k, err := generateECPrivateKey(elliptic.P384())
if err != nil {
return nil, fmt.Errorf("error generating EC P-384 key: %s", err)
}
k.curveName = "P-384"
k.signatureAlgorithm = es384
return k, nil
}
// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521.
func GenerateECP521PrivateKey() (PrivateKey, error) {
k, err := generateECPrivateKey(elliptic.P521())
if err != nil {
return nil, fmt.Errorf("error generating EC P-521 key: %s", err)
}
k.curveName = "P-521"
k.signatureAlgorithm = es512
return k, nil
}

View File

@@ -1,50 +0,0 @@
package libtrust
import (
"path/filepath"
)
// FilterByHosts filters the list of PublicKeys to only those which contain a
// 'hosts' pattern which matches the given host. If *includeEmpty* is true,
// then keys which do not specify any hosts are also returned.
func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) {
filtered := make([]PublicKey, 0, len(keys))
for _, pubKey := range keys {
var hosts []string
switch v := pubKey.GetExtendedField("hosts").(type) {
case []string:
hosts = v
case []interface{}:
for _, value := range v {
h, ok := value.(string)
if !ok {
continue
}
hosts = append(hosts, h)
}
}
if len(hosts) == 0 {
if includeEmpty {
filtered = append(filtered, pubKey)
}
continue
}
// Check if any hosts match pattern
for _, hostPattern := range hosts {
match, err := filepath.Match(hostPattern, host)
if err != nil {
return nil, err
}
if match {
filtered = append(filtered, pubKey)
continue
}
}
}
return filtered, nil
}

View File

@@ -1,56 +0,0 @@
package libtrust
import (
"crypto"
_ "crypto/sha256" // Registrer SHA224 and SHA256
_ "crypto/sha512" // Registrer SHA384 and SHA512
"fmt"
)
type signatureAlgorithm struct {
algHeaderParam string
hashID crypto.Hash
}
func (h *signatureAlgorithm) HeaderParam() string {
return h.algHeaderParam
}
func (h *signatureAlgorithm) HashID() crypto.Hash {
return h.hashID
}
var (
rs256 = &signatureAlgorithm{"RS256", crypto.SHA256}
rs384 = &signatureAlgorithm{"RS384", crypto.SHA384}
rs512 = &signatureAlgorithm{"RS512", crypto.SHA512}
es256 = &signatureAlgorithm{"ES256", crypto.SHA256}
es384 = &signatureAlgorithm{"ES384", crypto.SHA384}
es512 = &signatureAlgorithm{"ES512", crypto.SHA512}
)
func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) {
switch {
case alg == "RS256":
return rs256, nil
case alg == "RS384":
return rs384, nil
case alg == "RS512":
return rs512, nil
default:
return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg)
}
}
func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm {
switch {
case hashID == crypto.SHA512:
return rs512
case hashID == crypto.SHA384:
return rs384
case hashID == crypto.SHA256:
fallthrough
default:
return rs256
}
}

View File

@@ -1,657 +0,0 @@
package libtrust
import (
"bytes"
"crypto"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"sort"
"time"
"unicode"
)
var (
// ErrInvalidSignContent is used when the content to be signed is invalid.
ErrInvalidSignContent = errors.New("invalid sign content")
// ErrInvalidJSONContent is used when invalid json is encountered.
ErrInvalidJSONContent = errors.New("invalid json content")
// ErrMissingSignatureKey is used when the specified signature key
// does not exist in the JSON content.
ErrMissingSignatureKey = errors.New("missing signature key")
)
type jsHeader struct {
JWK PublicKey `json:"jwk,omitempty"`
Algorithm string `json:"alg"`
Chain []string `json:"x5c,omitempty"`
}
type jsSignature struct {
Header jsHeader `json:"header"`
Signature string `json:"signature"`
Protected string `json:"protected,omitempty"`
}
type jsSignaturesSorted []jsSignature
func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] }
func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) }
func (jsbkid jsSignaturesSorted) Less(i, j int) bool {
ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID()
si, sj := jsbkid[i].Signature, jsbkid[j].Signature
if ki == kj {
return si < sj
}
return ki < kj
}
type signKey struct {
PrivateKey
Chain []*x509.Certificate
}
// JSONSignature represents a signature of a json object.
type JSONSignature struct {
payload string
signatures []jsSignature
indent string
formatLength int
formatTail []byte
}
func newJSONSignature() *JSONSignature {
return &JSONSignature{
signatures: make([]jsSignature, 0, 1),
}
}
// Payload returns the encoded payload of the signature. This
// payload should not be signed directly
func (js *JSONSignature) Payload() ([]byte, error) {
return joseBase64UrlDecode(js.payload)
}
func (js *JSONSignature) protectedHeader() (string, error) {
protected := map[string]interface{}{
"formatLength": js.formatLength,
"formatTail": joseBase64UrlEncode(js.formatTail),
"time": time.Now().UTC().Format(time.RFC3339),
}
protectedBytes, err := json.Marshal(protected)
if err != nil {
return "", err
}
return joseBase64UrlEncode(protectedBytes), nil
}
func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) {
buf := make([]byte, len(js.payload)+len(protectedHeader)+1)
copy(buf, protectedHeader)
buf[len(protectedHeader)] = '.'
copy(buf[len(protectedHeader)+1:], js.payload)
return buf, nil
}
// Sign adds a signature using the given private key.
func (js *JSONSignature) Sign(key PrivateKey) error {
protected, err := js.protectedHeader()
if err != nil {
return err
}
signBytes, err := js.signBytes(protected)
if err != nil {
return err
}
sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
if err != nil {
return err
}
js.signatures = append(js.signatures, jsSignature{
Header: jsHeader{
JWK: key.PublicKey(),
Algorithm: algorithm,
},
Signature: joseBase64UrlEncode(sigBytes),
Protected: protected,
})
return nil
}
// SignWithChain adds a signature using the given private key
// and setting the x509 chain. The public key of the first element
// in the chain must be the public key corresponding with the sign key.
func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error {
// Ensure key.Chain[0] is public key for key
//key.Chain.PublicKey
//key.PublicKey().CryptoPublicKey()
// Verify chain
protected, err := js.protectedHeader()
if err != nil {
return err
}
signBytes, err := js.signBytes(protected)
if err != nil {
return err
}
sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
if err != nil {
return err
}
header := jsHeader{
Chain: make([]string, len(chain)),
Algorithm: algorithm,
}
for i, cert := range chain {
header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw)
}
js.signatures = append(js.signatures, jsSignature{
Header: header,
Signature: joseBase64UrlEncode(sigBytes),
Protected: protected,
})
return nil
}
// Verify verifies all the signatures and returns the list of
// public keys used to sign. Any x509 chains are not checked.
func (js *JSONSignature) Verify() ([]PublicKey, error) {
keys := make([]PublicKey, len(js.signatures))
for i, signature := range js.signatures {
signBytes, err := js.signBytes(signature.Protected)
if err != nil {
return nil, err
}
var publicKey PublicKey
if len(signature.Header.Chain) > 0 {
certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
if err != nil {
return nil, err
}
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
return nil, err
}
publicKey, err = FromCryptoPublicKey(cert.PublicKey)
if err != nil {
return nil, err
}
} else if signature.Header.JWK != nil {
publicKey = signature.Header.JWK
} else {
return nil, errors.New("missing public key")
}
sigBytes, err := joseBase64UrlDecode(signature.Signature)
if err != nil {
return nil, err
}
err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
if err != nil {
return nil, err
}
keys[i] = publicKey
}
return keys, nil
}
// VerifyChains verifies all the signatures and the chains associated
// with each signature and returns the list of verified chains.
// Signatures without an x509 chain are not checked.
func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) {
chains := make([][]*x509.Certificate, 0, len(js.signatures))
for _, signature := range js.signatures {
signBytes, err := js.signBytes(signature.Protected)
if err != nil {
return nil, err
}
var publicKey PublicKey
if len(signature.Header.Chain) > 0 {
certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
if err != nil {
return nil, err
}
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
return nil, err
}
publicKey, err = FromCryptoPublicKey(cert.PublicKey)
if err != nil {
return nil, err
}
intermediates := x509.NewCertPool()
if len(signature.Header.Chain) > 1 {
intermediateChain := signature.Header.Chain[1:]
for i := range intermediateChain {
certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i])
if err != nil {
return nil, err
}
intermediate, err := x509.ParseCertificate(certBytes)
if err != nil {
return nil, err
}
intermediates.AddCert(intermediate)
}
}
verifyOptions := x509.VerifyOptions{
Intermediates: intermediates,
Roots: ca,
}
verifiedChains, err := cert.Verify(verifyOptions)
if err != nil {
return nil, err
}
chains = append(chains, verifiedChains...)
sigBytes, err := joseBase64UrlDecode(signature.Signature)
if err != nil {
return nil, err
}
err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
if err != nil {
return nil, err
}
}
}
return chains, nil
}
// JWS returns JSON serialized JWS according to
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2
func (js *JSONSignature) JWS() ([]byte, error) {
if len(js.signatures) == 0 {
return nil, errors.New("missing signature")
}
sort.Sort(jsSignaturesSorted(js.signatures))
jsonMap := map[string]interface{}{
"payload": js.payload,
"signatures": js.signatures,
}
return json.MarshalIndent(jsonMap, "", " ")
}
func notSpace(r rune) bool {
return !unicode.IsSpace(r)
}
func detectJSONIndent(jsonContent []byte) (indent string) {
if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' {
quoteIndex := bytes.IndexRune(jsonContent[1:], '"')
if quoteIndex > 0 {
indent = string(jsonContent[2 : quoteIndex+1])
}
}
return
}
type jsParsedHeader struct {
JWK json.RawMessage `json:"jwk"`
Algorithm string `json:"alg"`
Chain []string `json:"x5c"`
}
type jsParsedSignature struct {
Header jsParsedHeader `json:"header"`
Signature string `json:"signature"`
Protected string `json:"protected"`
}
// ParseJWS parses a JWS serialized JSON object into a Json Signature.
func ParseJWS(content []byte) (*JSONSignature, error) {
type jsParsed struct {
Payload string `json:"payload"`
Signatures []jsParsedSignature `json:"signatures"`
}
parsed := &jsParsed{}
err := json.Unmarshal(content, parsed)
if err != nil {
return nil, err
}
if len(parsed.Signatures) == 0 {
return nil, errors.New("missing signatures")
}
payload, err := joseBase64UrlDecode(parsed.Payload)
if err != nil {
return nil, err
}
js, err := NewJSONSignature(payload)
if err != nil {
return nil, err
}
js.signatures = make([]jsSignature, len(parsed.Signatures))
for i, signature := range parsed.Signatures {
header := jsHeader{
Algorithm: signature.Header.Algorithm,
}
if signature.Header.Chain != nil {
header.Chain = signature.Header.Chain
}
if signature.Header.JWK != nil {
publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK))
if err != nil {
return nil, err
}
header.JWK = publicKey
}
js.signatures[i] = jsSignature{
Header: header,
Signature: signature.Signature,
Protected: signature.Protected,
}
}
return js, nil
}
// NewJSONSignature returns a new unsigned JWS from a json byte array.
// JSONSignature will need to be signed before serializing or storing.
// Optionally, one or more signatures can be provided as byte buffers,
// containing serialized JWS signatures, to assemble a fully signed JWS
// package. It is the callers responsibility to ensure uniqueness of the
// provided signatures.
func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) {
var dataMap map[string]interface{}
err := json.Unmarshal(content, &dataMap)
if err != nil {
return nil, err
}
js := newJSONSignature()
js.indent = detectJSONIndent(content)
js.payload = joseBase64UrlEncode(content)
// Find trailing } and whitespace, put in protected header
closeIndex := bytes.LastIndexFunc(content, notSpace)
if content[closeIndex] != '}' {
return nil, ErrInvalidJSONContent
}
lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace)
if content[lastRuneIndex] == ',' {
return nil, ErrInvalidJSONContent
}
js.formatLength = lastRuneIndex + 1
js.formatTail = content[js.formatLength:]
if len(signatures) > 0 {
for _, signature := range signatures {
var parsedJSig jsParsedSignature
if err := json.Unmarshal(signature, &parsedJSig); err != nil {
return nil, err
}
// TODO(stevvooe): A lot of the code below is repeated in
// ParseJWS. It will require more refactoring to fix that.
jsig := jsSignature{
Header: jsHeader{
Algorithm: parsedJSig.Header.Algorithm,
},
Signature: parsedJSig.Signature,
Protected: parsedJSig.Protected,
}
if parsedJSig.Header.Chain != nil {
jsig.Header.Chain = parsedJSig.Header.Chain
}
if parsedJSig.Header.JWK != nil {
publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK))
if err != nil {
return nil, err
}
jsig.Header.JWK = publicKey
}
js.signatures = append(js.signatures, jsig)
}
}
return js, nil
}
// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or
// struct. JWS will need to be signed before serializing or storing.
func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) {
switch content.(type) {
case map[string]interface{}:
case struct{}:
default:
return nil, errors.New("invalid data type")
}
js := newJSONSignature()
js.indent = " "
payload, err := json.MarshalIndent(content, "", js.indent)
if err != nil {
return nil, err
}
js.payload = joseBase64UrlEncode(payload)
// Remove '\n}' from formatted section, put in protected header
js.formatLength = len(payload) - 2
js.formatTail = payload[js.formatLength:]
return js, nil
}
func readIntFromMap(key string, m map[string]interface{}) (int, bool) {
value, ok := m[key]
if !ok {
return 0, false
}
switch v := value.(type) {
case int:
return v, true
case float64:
return int(v), true
default:
return 0, false
}
}
func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) {
value, ok := m[key]
if !ok {
return "", false
}
v, ok = value.(string)
return
}
// ParsePrettySignature parses a formatted signature into a
// JSON signature. If the signatures are missing the format information
// an error is thrown. The formatted signature must be created by
// the same method as format signature.
func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) {
var contentMap map[string]json.RawMessage
err := json.Unmarshal(content, &contentMap)
if err != nil {
return nil, fmt.Errorf("error unmarshalling content: %s", err)
}
sigMessage, ok := contentMap[signatureKey]
if !ok {
return nil, ErrMissingSignatureKey
}
var signatureBlocks []jsParsedSignature
err = json.Unmarshal([]byte(sigMessage), &signatureBlocks)
if err != nil {
return nil, fmt.Errorf("error unmarshalling signatures: %s", err)
}
js := newJSONSignature()
js.signatures = make([]jsSignature, len(signatureBlocks))
for i, signatureBlock := range signatureBlocks {
protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected)
if err != nil {
return nil, fmt.Errorf("base64 decode error: %s", err)
}
var protectedHeader map[string]interface{}
err = json.Unmarshal(protectedBytes, &protectedHeader)
if err != nil {
return nil, fmt.Errorf("error unmarshalling protected header: %s", err)
}
formatLength, ok := readIntFromMap("formatLength", protectedHeader)
if !ok {
return nil, errors.New("missing formatted length")
}
encodedTail, ok := readStringFromMap("formatTail", protectedHeader)
if !ok {
return nil, errors.New("missing formatted tail")
}
formatTail, err := joseBase64UrlDecode(encodedTail)
if err != nil {
return nil, fmt.Errorf("base64 decode error on tail: %s", err)
}
if js.formatLength == 0 {
js.formatLength = formatLength
} else if js.formatLength != formatLength {
return nil, errors.New("conflicting format length")
}
if len(js.formatTail) == 0 {
js.formatTail = formatTail
} else if bytes.Compare(js.formatTail, formatTail) != 0 {
return nil, errors.New("conflicting format tail")
}
header := jsHeader{
Algorithm: signatureBlock.Header.Algorithm,
Chain: signatureBlock.Header.Chain,
}
if signatureBlock.Header.JWK != nil {
publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK))
if err != nil {
return nil, fmt.Errorf("error unmarshalling public key: %s", err)
}
header.JWK = publicKey
}
js.signatures[i] = jsSignature{
Header: header,
Signature: signatureBlock.Signature,
Protected: signatureBlock.Protected,
}
}
if js.formatLength > len(content) {
return nil, errors.New("invalid format length")
}
formatted := make([]byte, js.formatLength+len(js.formatTail))
copy(formatted, content[:js.formatLength])
copy(formatted[js.formatLength:], js.formatTail)
js.indent = detectJSONIndent(formatted)
js.payload = joseBase64UrlEncode(formatted)
return js, nil
}
// PrettySignature formats a json signature into an easy to read
// single json serialized object.
func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) {
if len(js.signatures) == 0 {
return nil, errors.New("no signatures")
}
payload, err := joseBase64UrlDecode(js.payload)
if err != nil {
return nil, err
}
payload = payload[:js.formatLength]
sort.Sort(jsSignaturesSorted(js.signatures))
var marshalled []byte
var marshallErr error
if js.indent != "" {
marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent)
} else {
marshalled, marshallErr = json.Marshal(js.signatures)
}
if marshallErr != nil {
return nil, marshallErr
}
buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34))
buf.Write(payload)
buf.WriteByte(',')
if js.indent != "" {
buf.WriteByte('\n')
buf.WriteString(js.indent)
buf.WriteByte('"')
buf.WriteString(signatureKey)
buf.WriteString("\": ")
buf.Write(marshalled)
buf.WriteByte('\n')
} else {
buf.WriteByte('"')
buf.WriteString(signatureKey)
buf.WriteString("\":")
buf.Write(marshalled)
}
buf.WriteByte('}')
return buf.Bytes(), nil
}
// Signatures provides the signatures on this JWS as opaque blobs, sorted by
// keyID. These blobs can be stored and reassembled with payloads. Internally,
// they are simply marshaled json web signatures but implementations should
// not rely on this.
func (js *JSONSignature) Signatures() ([][]byte, error) {
sort.Sort(jsSignaturesSorted(js.signatures))
var sb [][]byte
for _, jsig := range js.signatures {
p, err := json.Marshal(jsig)
if err != nil {
return nil, err
}
sb = append(sb, p)
}
return sb, nil
}
// Merge combines the signatures from one or more other signatures into the
// method receiver. If the payloads differ for any argument, an error will be
// returned and the receiver will not be modified.
func (js *JSONSignature) Merge(others ...*JSONSignature) error {
merged := js.signatures
for _, other := range others {
if js.payload != other.payload {
return fmt.Errorf("payloads differ from merge target")
}
merged = append(merged, other.signatures...)
}
js.signatures = merged
return nil
}

View File

@@ -1,253 +0,0 @@
package libtrust
import (
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
)
// PublicKey is a generic interface for a Public Key.
type PublicKey interface {
// KeyType returns the key type for this key. For elliptic curve keys,
// this value should be "EC". For RSA keys, this value should be "RSA".
KeyType() string
// KeyID returns a distinct identifier which is unique to this Public Key.
// The format generated by this library is a base32 encoding of a 240 bit
// hash of the public key data divided into 12 groups like so:
// ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
KeyID() string
// Verify verifyies the signature of the data in the io.Reader using this
// Public Key. The alg parameter should identify the digital signature
// algorithm which was used to produce the signature and should be
// supported by this public key. Returns a nil error if the signature
// is valid.
Verify(data io.Reader, alg string, signature []byte) error
// CryptoPublicKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The type
// is either *rsa.PublicKey or *ecdsa.PublicKey
CryptoPublicKey() crypto.PublicKey
// These public keys can be serialized to the standard JSON encoding for
// JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web
// Algorithms.
MarshalJSON() ([]byte, error)
// These keys can also be serialized to the standard PEM encoding.
PEMBlock() (*pem.Block, error)
// The string representation of a key is its key type and ID.
String() string
AddExtendedField(string, interface{})
GetExtendedField(string) interface{}
}
// PrivateKey is a generic interface for a Private Key.
type PrivateKey interface {
// A PrivateKey contains all fields and methods of a PublicKey of the
// same type. The MarshalJSON method also outputs the private key as a
// JSON Web Key, and the PEMBlock method outputs the private key as a
// PEM block.
PublicKey
// PublicKey returns the PublicKey associated with this PrivateKey.
PublicKey() PublicKey
// Sign signs the data read from the io.Reader using a signature algorithm
// supported by the private key. If the specified hashing algorithm is
// supported by this key, that hash function is used to generate the
// signature otherwise the the default hashing algorithm for this key is
// used. Returns the signature and identifier of the algorithm used.
Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error)
// CryptoPrivateKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The
// type is either *rsa.PublicKey or *ecdsa.PublicKey
CryptoPrivateKey() crypto.PrivateKey
}
// FromCryptoPublicKey returns a libtrust PublicKey representation of the given
// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given
// key is of an unsupported type.
func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) {
switch cryptoPublicKey := cryptoPublicKey.(type) {
case *ecdsa.PublicKey:
return fromECPublicKey(cryptoPublicKey)
case *rsa.PublicKey:
return fromRSAPublicKey(cryptoPublicKey), nil
default:
return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey)
}
}
// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given
// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given
// key is of an unsupported type.
func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) {
switch cryptoPrivateKey := cryptoPrivateKey.(type) {
case *ecdsa.PrivateKey:
return fromECPrivateKey(cryptoPrivateKey)
case *rsa.PrivateKey:
return fromRSAPrivateKey(cryptoPrivateKey), nil
default:
return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey)
}
}
// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust
// PublicKey or an error if there is a problem with the encoding.
func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) {
pemBlock, _ := pem.Decode(data)
if pemBlock == nil {
return nil, errors.New("unable to find PEM encoded data")
} else if pemBlock.Type != "PUBLIC KEY" {
return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
}
return pubKeyFromPEMBlock(pemBlock)
}
// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of
// PEM blocks appended one after the other and returns a slice of PublicKey
// objects that it finds.
func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) {
pubKeys := []PublicKey{}
for {
var pemBlock *pem.Block
pemBlock, data = pem.Decode(data)
if pemBlock == nil {
break
} else if pemBlock.Type != "PUBLIC KEY" {
return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
}
pubKey, err := pubKeyFromPEMBlock(pemBlock)
if err != nil {
return nil, err
}
pubKeys = append(pubKeys, pubKey)
}
return pubKeys, nil
}
// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust
// PrivateKey or an error if there is a problem with the encoding.
func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) {
pemBlock, _ := pem.Decode(data)
if pemBlock == nil {
return nil, errors.New("unable to find PEM encoded data")
}
var key PrivateKey
switch {
case pemBlock.Type == "RSA PRIVATE KEY":
rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err)
}
key = fromRSAPrivateKey(rsaPrivateKey)
case pemBlock.Type == "EC PRIVATE KEY":
ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err)
}
key, err = fromECPrivateKey(ecPrivateKey)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type)
}
addPEMHeadersToKey(pemBlock, key.PublicKey())
return key, nil
}
// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic
// Public Key to be used with libtrust.
func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) {
jwk := make(map[string]interface{})
err := json.Unmarshal(data, &jwk)
if err != nil {
return nil, fmt.Errorf(
"decoding JWK Public Key JSON data: %s\n", err,
)
}
// Get the Key Type value.
kty, err := stringFromMap(jwk, "kty")
if err != nil {
return nil, fmt.Errorf("JWK Public Key type: %s", err)
}
switch {
case kty == "EC":
// Call out to unmarshal EC public key.
return ecPublicKeyFromMap(jwk)
case kty == "RSA":
// Call out to unmarshal RSA public key.
return rsaPublicKeyFromMap(jwk)
default:
return nil, fmt.Errorf(
"JWK Public Key type not supported: %q\n", kty,
)
}
}
// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set
// and returns a slice of Public Key objects.
func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) {
rawKeys, err := loadJSONKeySetRaw(data)
if err != nil {
return nil, err
}
pubKeys := make([]PublicKey, 0, len(rawKeys))
for _, rawKey := range rawKeys {
pubKey, err := UnmarshalPublicKeyJWK(rawKey)
if err != nil {
return nil, err
}
pubKeys = append(pubKeys, pubKey)
}
return pubKeys, nil
}
// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic
// Private Key to be used with libtrust.
func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) {
jwk := make(map[string]interface{})
err := json.Unmarshal(data, &jwk)
if err != nil {
return nil, fmt.Errorf(
"decoding JWK Private Key JSON data: %s\n", err,
)
}
// Get the Key Type value.
kty, err := stringFromMap(jwk, "kty")
if err != nil {
return nil, fmt.Errorf("JWK Private Key type: %s", err)
}
switch {
case kty == "EC":
// Call out to unmarshal EC private key.
return ecPrivateKeyFromMap(jwk)
case kty == "RSA":
// Call out to unmarshal RSA private key.
return rsaPrivateKeyFromMap(jwk)
default:
return nil, fmt.Errorf(
"JWK Private Key type not supported: %q\n", kty,
)
}
}

View File

@@ -1,255 +0,0 @@
package libtrust
import (
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
)
var (
// ErrKeyFileDoesNotExist indicates that the private key file does not exist.
ErrKeyFileDoesNotExist = errors.New("key file does not exist")
)
func readKeyFileBytes(filename string) ([]byte, error) {
data, err := ioutil.ReadFile(filename)
if err != nil {
if os.IsNotExist(err) {
err = ErrKeyFileDoesNotExist
} else {
err = fmt.Errorf("unable to read key file %s: %s", filename, err)
}
return nil, err
}
return data, nil
}
/*
Loading and Saving of Public and Private Keys in either PEM or JWK format.
*/
// LoadKeyFile opens the given filename and attempts to read a Private Key
// encoded in either PEM or JWK format (if .json or .jwk file extension).
func LoadKeyFile(filename string) (PrivateKey, error) {
contents, err := readKeyFileBytes(filename)
if err != nil {
return nil, err
}
var key PrivateKey
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
key, err = UnmarshalPrivateKeyJWK(contents)
if err != nil {
return nil, fmt.Errorf("unable to decode private key JWK: %s", err)
}
} else {
key, err = UnmarshalPrivateKeyPEM(contents)
if err != nil {
return nil, fmt.Errorf("unable to decode private key PEM: %s", err)
}
}
return key, nil
}
// LoadPublicKeyFile opens the given filename and attempts to read a Public Key
// encoded in either PEM or JWK format (if .json or .jwk file extension).
func LoadPublicKeyFile(filename string) (PublicKey, error) {
contents, err := readKeyFileBytes(filename)
if err != nil {
return nil, err
}
var key PublicKey
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
key, err = UnmarshalPublicKeyJWK(contents)
if err != nil {
return nil, fmt.Errorf("unable to decode public key JWK: %s", err)
}
} else {
key, err = UnmarshalPublicKeyPEM(contents)
if err != nil {
return nil, fmt.Errorf("unable to decode public key PEM: %s", err)
}
}
return key, nil
}
// SaveKey saves the given key to a file using the provided filename.
// This process will overwrite any existing file at the provided location.
func SaveKey(filename string, key PrivateKey) error {
var encodedKey []byte
var err error
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
// Encode in JSON Web Key format.
encodedKey, err = json.MarshalIndent(key, "", " ")
if err != nil {
return fmt.Errorf("unable to encode private key JWK: %s", err)
}
} else {
// Encode in PEM format.
pemBlock, err := key.PEMBlock()
if err != nil {
return fmt.Errorf("unable to encode private key PEM: %s", err)
}
encodedKey = pem.EncodeToMemory(pemBlock)
}
err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600))
if err != nil {
return fmt.Errorf("unable to write private key file %s: %s", filename, err)
}
return nil
}
// SavePublicKey saves the given public key to the file.
func SavePublicKey(filename string, key PublicKey) error {
var encodedKey []byte
var err error
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
// Encode in JSON Web Key format.
encodedKey, err = json.MarshalIndent(key, "", " ")
if err != nil {
return fmt.Errorf("unable to encode public key JWK: %s", err)
}
} else {
// Encode in PEM format.
pemBlock, err := key.PEMBlock()
if err != nil {
return fmt.Errorf("unable to encode public key PEM: %s", err)
}
encodedKey = pem.EncodeToMemory(pemBlock)
}
err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644))
if err != nil {
return fmt.Errorf("unable to write public key file %s: %s", filename, err)
}
return nil
}
// Public Key Set files
type jwkSet struct {
Keys []json.RawMessage `json:"keys"`
}
// LoadKeySetFile loads a key set
func LoadKeySetFile(filename string) ([]PublicKey, error) {
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
return loadJSONKeySetFile(filename)
}
// Must be a PEM format file
return loadPEMKeySetFile(filename)
}
func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) {
if len(data) == 0 {
// This is okay, just return an empty slice.
return []json.RawMessage{}, nil
}
keySet := jwkSet{}
err := json.Unmarshal(data, &keySet)
if err != nil {
return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err)
}
return keySet.Keys, nil
}
func loadJSONKeySetFile(filename string) ([]PublicKey, error) {
contents, err := readKeyFileBytes(filename)
if err != nil && err != ErrKeyFileDoesNotExist {
return nil, err
}
return UnmarshalPublicKeyJWKSet(contents)
}
func loadPEMKeySetFile(filename string) ([]PublicKey, error) {
data, err := readKeyFileBytes(filename)
if err != nil && err != ErrKeyFileDoesNotExist {
return nil, err
}
return UnmarshalPublicKeyPEMBundle(data)
}
// AddKeySetFile adds a key to a key set
func AddKeySetFile(filename string, key PublicKey) error {
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
return addKeySetJSONFile(filename, key)
}
// Must be a PEM format file
return addKeySetPEMFile(filename, key)
}
func addKeySetJSONFile(filename string, key PublicKey) error {
encodedKey, err := json.Marshal(key)
if err != nil {
return fmt.Errorf("unable to encode trusted client key: %s", err)
}
contents, err := readKeyFileBytes(filename)
if err != nil && err != ErrKeyFileDoesNotExist {
return err
}
rawEntries, err := loadJSONKeySetRaw(contents)
if err != nil {
return err
}
rawEntries = append(rawEntries, json.RawMessage(encodedKey))
entriesWrapper := jwkSet{Keys: rawEntries}
encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ")
if err != nil {
return fmt.Errorf("unable to encode trusted client keys: %s", err)
}
err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644))
if err != nil {
return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err)
}
return nil
}
func addKeySetPEMFile(filename string, key PublicKey) error {
// Encode to PEM, open file for appending, write PEM.
file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644))
if err != nil {
return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err)
}
defer file.Close()
pemBlock, err := key.PEMBlock()
if err != nil {
return fmt.Errorf("unable to encoded trusted key: %s", err)
}
_, err = file.Write(pem.EncodeToMemory(pemBlock))
if err != nil {
return fmt.Errorf("unable to write trusted keys file: %s", err)
}
return nil
}

View File

@@ -1,175 +0,0 @@
package libtrust
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"os"
"path"
"sync"
)
// ClientKeyManager manages client keys on the filesystem
type ClientKeyManager struct {
key PrivateKey
clientFile string
clientDir string
clientLock sync.RWMutex
clients []PublicKey
configLock sync.Mutex
configs []*tls.Config
}
// NewClientKeyManager loads a new manager from a set of key files
// and managed by the given private key.
func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) {
m := &ClientKeyManager{
key: trustKey,
clientFile: clientFile,
clientDir: clientDir,
}
if err := m.loadKeys(); err != nil {
return nil, err
}
// TODO Start watching file and directory
return m, nil
}
func (c *ClientKeyManager) loadKeys() (err error) {
// Load authorized keys file
var clients []PublicKey
if c.clientFile != "" {
clients, err = LoadKeySetFile(c.clientFile)
if err != nil {
return fmt.Errorf("unable to load authorized keys: %s", err)
}
}
// Add clients from authorized keys directory
files, err := ioutil.ReadDir(c.clientDir)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("unable to open authorized keys directory: %s", err)
}
for _, f := range files {
if !f.IsDir() {
publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name()))
if err != nil {
return fmt.Errorf("unable to load authorized key file: %s", err)
}
clients = append(clients, publicKey)
}
}
c.clientLock.Lock()
c.clients = clients
c.clientLock.Unlock()
return nil
}
// RegisterTLSConfig registers a tls configuration to manager
// such that any changes to the keys may be reflected in
// the tls client CA pool
func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error {
c.clientLock.RLock()
certPool, err := GenerateCACertPool(c.key, c.clients)
if err != nil {
return fmt.Errorf("CA pool generation error: %s", err)
}
c.clientLock.RUnlock()
tlsConfig.ClientCAs = certPool
c.configLock.Lock()
c.configs = append(c.configs, tlsConfig)
c.configLock.Unlock()
return nil
}
// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for
// libtrust identity authentication for the domain specified
func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) {
tlsConfig := newTLSConfig()
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
if err := clients.RegisterTLSConfig(tlsConfig); err != nil {
return nil, err
}
// Generate cert
ips, domains, err := parseAddr(addr)
if err != nil {
return nil, err
}
// add domain that it expects clients to use
domains = append(domains, domain)
x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips)
if err != nil {
return nil, fmt.Errorf("certificate generation error: %s", err)
}
tlsConfig.Certificates = []tls.Certificate{{
Certificate: [][]byte{x509Cert.Raw},
PrivateKey: trustKey.CryptoPrivateKey(),
Leaf: x509Cert,
}}
return tlsConfig, nil
}
// NewCertAuthTLSConfig creates a tls.Config for the server to use for
// certificate authentication
func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) {
tlsConfig := newTLSConfig()
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
if err != nil {
return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err)
}
tlsConfig.Certificates = []tls.Certificate{cert}
// Verify client certificates against a CA?
if caPath != "" {
certPool := x509.NewCertPool()
file, err := ioutil.ReadFile(caPath)
if err != nil {
return nil, fmt.Errorf("Couldn't read CA certificate: %s", err)
}
certPool.AppendCertsFromPEM(file)
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
tlsConfig.ClientCAs = certPool
}
return tlsConfig, nil
}
func newTLSConfig() *tls.Config {
return &tls.Config{
NextProtos: []string{"http/1.1"},
// Avoid fallback on insecure SSL protocols
MinVersion: tls.VersionTLS10,
}
}
// parseAddr parses an address into an array of IPs and domains
func parseAddr(addr string) ([]net.IP, []string, error) {
host, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, nil, err
}
var domains []string
var ips []net.IP
ip := net.ParseIP(host)
if ip != nil {
ips = []net.IP{ip}
} else {
domains = []string{host}
}
return ips, domains, nil
}

View File

@@ -1,427 +0,0 @@
package libtrust
import (
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"math/big"
)
/*
* RSA DSA PUBLIC KEY
*/
// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms.
type rsaPublicKey struct {
*rsa.PublicKey
extended map[string]interface{}
}
func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey {
return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}}
}
// KeyType returns the JWK key type for RSA keys, i.e., "RSA".
func (k *rsaPublicKey) KeyType() string {
return "RSA"
}
// KeyID returns a distinct identifier which is unique to this Public Key.
func (k *rsaPublicKey) KeyID() string {
return keyIDFromCryptoKey(k)
}
func (k *rsaPublicKey) String() string {
return fmt.Sprintf("RSA Public Key <%s>", k.KeyID())
}
// Verify verifyies the signature of the data in the io.Reader using this Public Key.
// The alg parameter should be the name of the JWA digital signature algorithm
// which was used to produce the signature and should be supported by this
// public key. Returns a nil error if the signature is valid.
func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
// Verify the signature of the given date, return non-nil error if valid.
sigAlg, err := rsaSignatureAlgorithmByName(alg)
if err != nil {
return fmt.Errorf("unable to verify Signature: %s", err)
}
hasher := sigAlg.HashID().New()
_, err = io.Copy(hasher, data)
if err != nil {
return fmt.Errorf("error reading data to sign: %s", err)
}
hash := hasher.Sum(nil)
err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature)
if err != nil {
return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err)
}
return nil
}
// CryptoPublicKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The type
// is either *rsa.PublicKey or *ecdsa.PublicKey
func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
return k.PublicKey
}
func (k *rsaPublicKey) toMap() map[string]interface{} {
jwk := make(map[string]interface{})
for k, v := range k.extended {
jwk[k] = v
}
jwk["kty"] = k.KeyType()
jwk["kid"] = k.KeyID()
jwk["n"] = joseBase64UrlEncode(k.N.Bytes())
jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E))
return jwk
}
// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
// RSA keys.
func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) {
return json.Marshal(k.toMap())
}
// PEMBlock serializes this Public Key to DER-encoded PKIX format.
func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) {
derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
if err != nil {
return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err)
}
k.extended["kid"] = k.KeyID() // For display purposes.
return createPemBlock("PUBLIC KEY", derBytes, k.extended)
}
func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) {
k.extended[field] = value
}
func (k *rsaPublicKey) GetExtendedField(field string) interface{} {
v, ok := k.extended[field]
if !ok {
return nil
}
return v
}
func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) {
// JWK key type (kty) has already been determined to be "RSA".
// Need to extract 'n', 'e', and 'kid' and check for
// consistency.
// Get the modulus parameter N.
nB64Url, err := stringFromMap(jwk, "n")
if err != nil {
return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
}
n, err := parseRSAModulusParam(nB64Url)
if err != nil {
return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
}
// Get the public exponent E.
eB64Url, err := stringFromMap(jwk, "e")
if err != nil {
return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
}
e, err := parseRSAPublicExponentParam(eB64Url)
if err != nil {
return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
}
key := &rsaPublicKey{
PublicKey: &rsa.PublicKey{N: n, E: e},
}
// Key ID is optional, but if it exists, it should match the key.
_, ok := jwk["kid"]
if ok {
kid, err := stringFromMap(jwk, "kid")
if err != nil {
return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err)
}
if kid != key.KeyID() {
return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid)
}
}
if _, ok := jwk["d"]; ok {
return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent")
}
key.extended = jwk
return key, nil
}
/*
* RSA DSA PRIVATE KEY
*/
// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms.
type rsaPrivateKey struct {
rsaPublicKey
*rsa.PrivateKey
}
func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey {
return &rsaPrivateKey{
*fromRSAPublicKey(&cryptoPrivateKey.PublicKey),
cryptoPrivateKey,
}
}
// PublicKey returns the Public Key data associated with this Private Key.
func (k *rsaPrivateKey) PublicKey() PublicKey {
return &k.rsaPublicKey
}
func (k *rsaPrivateKey) String() string {
return fmt.Sprintf("RSA Private Key <%s>", k.KeyID())
}
// Sign signs the data read from the io.Reader using a signature algorithm supported
// by the RSA private key. If the specified hashing algorithm is supported by
// this key, that hash function is used to generate the signature otherwise the
// the default hashing algorithm for this key is used. Returns the signature
// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384",
// "RS512".
func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
// Generate a signature of the data using the internal alg.
sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID)
hasher := sigAlg.HashID().New()
_, err = io.Copy(hasher, data)
if err != nil {
return nil, "", fmt.Errorf("error reading data to sign: %s", err)
}
hash := hasher.Sum(nil)
signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash)
if err != nil {
return nil, "", fmt.Errorf("error producing signature: %s", err)
}
alg = sigAlg.HeaderParam()
return
}
// CryptoPrivateKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The type
// is either *rsa.PublicKey or *ecdsa.PublicKey
func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
return k.PrivateKey
}
func (k *rsaPrivateKey) toMap() map[string]interface{} {
k.Precompute() // Make sure the precomputed values are stored.
jwk := k.rsaPublicKey.toMap()
jwk["d"] = joseBase64UrlEncode(k.D.Bytes())
jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes())
jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes())
jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes())
jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes())
jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes())
otherPrimes := k.Primes[2:]
if len(otherPrimes) > 0 {
otherPrimesInfo := make([]interface{}, len(otherPrimes))
for i, r := range otherPrimes {
otherPrimeInfo := make(map[string]string, 3)
otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes())
crtVal := k.Precomputed.CRTValues[i]
otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes())
otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes())
otherPrimesInfo[i] = otherPrimeInfo
}
jwk["oth"] = otherPrimesInfo
}
return jwk
}
// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
// RSA keys.
func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) {
return json.Marshal(k.toMap())
}
// PEMBlock serializes this Private Key to DER-encoded PKIX format.
func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) {
derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey)
k.extended["keyID"] = k.KeyID() // For display purposes.
return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended)
}
func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) {
// The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that
// only the private key exponent 'd' is REQUIRED, the others are just for
// signature/decryption optimizations and SHOULD be included when the JWK
// is produced. We MAY choose to accept a JWK which only includes 'd', but
// we're going to go ahead and not choose to accept it without the extra
// fields. Only the 'oth' field will be optional (for multi-prime keys).
privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err)
}
firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
}
secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
}
firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
}
secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
}
crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
}
var oth interface{}
if _, ok := jwk["oth"]; ok {
oth = jwk["oth"]
delete(jwk, "oth")
}
// JWK key type (kty) has already been determined to be "RSA".
// Need to extract the public key information, then extract the private
// key values.
publicKey, err := rsaPublicKeyFromMap(jwk)
if err != nil {
return nil, err
}
privateKey := &rsa.PrivateKey{
PublicKey: *publicKey.PublicKey,
D: privateExponent,
Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor},
Precomputed: rsa.PrecomputedValues{
Dp: firstFactorCRT,
Dq: secondFactorCRT,
Qinv: crtCoeff,
},
}
if oth != nil {
// Should be an array of more JSON objects.
otherPrimesInfo, ok := oth.([]interface{})
if !ok {
return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array")
}
numOtherPrimeFactors := len(otherPrimesInfo)
if numOtherPrimeFactors == 0 {
return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty")
}
otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors)
productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor)
crtValues := make([]rsa.CRTValue, numOtherPrimeFactors)
for i, val := range otherPrimesInfo {
otherPrimeinfo, ok := val.(map[string]interface{})
if !ok {
return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object")
}
otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
}
otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
}
otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
}
crtValue := crtValues[i]
crtValue.Exp = otherFactorCRT
crtValue.Coeff = otherCrtCoeff
crtValue.R = productOfPrimes
otherPrimeFactors[i] = otherPrimeFactor
productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor)
}
privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...)
privateKey.Precomputed.CRTValues = crtValues
}
key := &rsaPrivateKey{
rsaPublicKey: *publicKey,
PrivateKey: privateKey,
}
return key, nil
}
/*
* Key Generation Functions.
*/
func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) {
k = new(rsaPrivateKey)
k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits)
if err != nil {
return nil, err
}
k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey
k.extended = make(map[string]interface{})
return
}
// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA.
func GenerateRSA2048PrivateKey() (PrivateKey, error) {
k, err := generateRSAPrivateKey(2048)
if err != nil {
return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err)
}
return k, nil
}
// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA.
func GenerateRSA3072PrivateKey() (PrivateKey, error) {
k, err := generateRSAPrivateKey(3072)
if err != nil {
return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err)
}
return k, nil
}
// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA.
func GenerateRSA4096PrivateKey() (PrivateKey, error) {
k, err := generateRSAPrivateKey(4096)
if err != nil {
return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err)
}
return k, nil
}

View File

@@ -1,363 +0,0 @@
package libtrust
import (
"bytes"
"crypto"
"crypto/elliptic"
"crypto/tls"
"crypto/x509"
"encoding/base32"
"encoding/base64"
"encoding/binary"
"encoding/pem"
"errors"
"fmt"
"math/big"
"net/url"
"os"
"path/filepath"
"strings"
"time"
)
// LoadOrCreateTrustKey will load a PrivateKey from the specified path
func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) {
if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil {
return nil, err
}
trustKey, err := LoadKeyFile(trustKeyPath)
if err == ErrKeyFileDoesNotExist {
trustKey, err = GenerateECP256PrivateKey()
if err != nil {
return nil, fmt.Errorf("error generating key: %s", err)
}
if err := SaveKey(trustKeyPath, trustKey); err != nil {
return nil, fmt.Errorf("error saving key file: %s", err)
}
dir, file := filepath.Split(trustKeyPath)
if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil {
return nil, fmt.Errorf("error saving public key file: %s", err)
}
} else if err != nil {
return nil, fmt.Errorf("error loading key file: %s", err)
}
return trustKey, nil
}
// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity
// based authentication from the specified dockerUrl, the rootConfigPath and
// the server name to which it is connecting.
// If trustUnknownHosts is true it will automatically add the host to the
// known-hosts.json in rootConfigPath.
func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) {
tlsConfig := newTLSConfig()
trustKeyPath := filepath.Join(rootConfigPath, "key.json")
knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json")
u, err := url.Parse(dockerUrl)
if err != nil {
return nil, fmt.Errorf("unable to parse machine url")
}
if u.Scheme == "unix" {
return nil, nil
}
addr := u.Host
proto := "tcp"
trustKey, err := LoadOrCreateTrustKey(trustKeyPath)
if err != nil {
return nil, fmt.Errorf("unable to load trust key: %s", err)
}
knownHosts, err := LoadKeySetFile(knownHostsPath)
if err != nil {
return nil, fmt.Errorf("could not load trusted hosts file: %s", err)
}
allowedHosts, err := FilterByHosts(knownHosts, addr, false)
if err != nil {
return nil, fmt.Errorf("error filtering hosts: %s", err)
}
certPool, err := GenerateCACertPool(trustKey, allowedHosts)
if err != nil {
return nil, fmt.Errorf("Could not create CA pool: %s", err)
}
tlsConfig.ServerName = serverName
tlsConfig.RootCAs = certPool
x509Cert, err := GenerateSelfSignedClientCert(trustKey)
if err != nil {
return nil, fmt.Errorf("certificate generation error: %s", err)
}
tlsConfig.Certificates = []tls.Certificate{{
Certificate: [][]byte{x509Cert.Raw},
PrivateKey: trustKey.CryptoPrivateKey(),
Leaf: x509Cert,
}}
tlsConfig.InsecureSkipVerify = true
testConn, err := tls.Dial(proto, addr, tlsConfig)
if err != nil {
return nil, fmt.Errorf("tls Handshake error: %s", err)
}
opts := x509.VerifyOptions{
Roots: tlsConfig.RootCAs,
CurrentTime: time.Now(),
DNSName: tlsConfig.ServerName,
Intermediates: x509.NewCertPool(),
}
certs := testConn.ConnectionState().PeerCertificates
for i, cert := range certs {
if i == 0 {
continue
}
opts.Intermediates.AddCert(cert)
}
if _, err := certs[0].Verify(opts); err != nil {
if _, ok := err.(x509.UnknownAuthorityError); ok {
if trustUnknownHosts {
pubKey, err := FromCryptoPublicKey(certs[0].PublicKey)
if err != nil {
return nil, fmt.Errorf("error extracting public key from cert: %s", err)
}
pubKey.AddExtendedField("hosts", []string{addr})
if err := AddKeySetFile(knownHostsPath, pubKey); err != nil {
return nil, fmt.Errorf("error adding machine to known hosts: %s", err)
}
} else {
return nil, fmt.Errorf("unable to connect. unknown host: %s", addr)
}
}
}
testConn.Close()
tlsConfig.InsecureSkipVerify = false
return tlsConfig, nil
}
// joseBase64UrlEncode encodes the given data using the standard base64 url
// encoding format but with all trailing '=' characters ommitted in accordance
// with the jose specification.
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
func joseBase64UrlEncode(b []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
// joseBase64UrlDecode decodes the given string using the standard base64 url
// decoder but first adds the appropriate number of trailing '=' characters in
// accordance with the jose specification.
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
func joseBase64UrlDecode(s string) ([]byte, error) {
s = strings.Replace(s, "\n", "", -1)
s = strings.Replace(s, " ", "", -1)
switch len(s) % 4 {
case 0:
case 2:
s += "=="
case 3:
s += "="
default:
return nil, errors.New("illegal base64url string")
}
return base64.URLEncoding.DecodeString(s)
}
func keyIDEncode(b []byte) string {
s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=")
var buf bytes.Buffer
var i int
for i = 0; i < len(s)/4-1; i++ {
start := i * 4
end := start + 4
buf.WriteString(s[start:end] + ":")
}
buf.WriteString(s[i*4:])
return buf.String()
}
func keyIDFromCryptoKey(pubKey PublicKey) string {
// Generate and return a 'libtrust' fingerprint of the public key.
// For an RSA key this should be:
// SHA256(DER encoded ASN1)
// Then truncated to 240 bits and encoded into 12 base32 groups like so:
// ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey())
if err != nil {
return ""
}
hasher := crypto.SHA256.New()
hasher.Write(derBytes)
return keyIDEncode(hasher.Sum(nil)[:30])
}
func stringFromMap(m map[string]interface{}, key string) (string, error) {
val, ok := m[key]
if !ok {
return "", fmt.Errorf("%q value not specified", key)
}
str, ok := val.(string)
if !ok {
return "", fmt.Errorf("%q value must be a string", key)
}
delete(m, key)
return str, nil
}
func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) {
curveByteLen := (curve.Params().BitSize + 7) >> 3
cBytes, err := joseBase64UrlDecode(cB64Url)
if err != nil {
return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
}
cByteLength := len(cBytes)
if cByteLength != curveByteLen {
return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen)
}
return new(big.Int).SetBytes(cBytes), nil
}
func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) {
dBytes, err := joseBase64UrlDecode(dB64Url)
if err != nil {
return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
}
// The length of this octet string MUST be ceiling(log-base-2(n)/8)
// octets (where n is the order of the curve). This is because the private
// key d must be in the interval [1, n-1] so the bitlength of d should be
// no larger than the bitlength of n-1. The easiest way to find the octet
// length is to take bitlength(n-1), add 7 to force a carry, and shift this
// bit sequence right by 3, which is essentially dividing by 8 and adding
// 1 if there is any remainder. Thus, the private key value d should be
// output to (bitlength(n-1)+7)>>3 octets.
n := curve.Params().N
octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
dByteLength := len(dBytes)
if dByteLength != octetLength {
return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength)
}
return new(big.Int).SetBytes(dBytes), nil
}
func parseRSAModulusParam(nB64Url string) (*big.Int, error) {
nBytes, err := joseBase64UrlDecode(nB64Url)
if err != nil {
return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
}
return new(big.Int).SetBytes(nBytes), nil
}
func serializeRSAPublicExponentParam(e int) []byte {
// We MUST use the minimum number of octets to represent E.
// E is supposed to be 65537 for performance and security reasons
// and is what golang's rsa package generates, but it might be
// different if imported from some other generator.
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(e))
var i int
for i = 0; i < 8; i++ {
if buf[i] != 0 {
break
}
}
return buf[i:]
}
func parseRSAPublicExponentParam(eB64Url string) (int, error) {
eBytes, err := joseBase64UrlDecode(eB64Url)
if err != nil {
return 0, fmt.Errorf("invalid base64 URL encoding: %s", err)
}
// Only the minimum number of bytes were used to represent E, but
// binary.BigEndian.Uint32 expects at least 4 bytes, so we need
// to add zero padding if necassary.
byteLen := len(eBytes)
buf := make([]byte, 4-byteLen, 4)
eBytes = append(buf, eBytes...)
return int(binary.BigEndian.Uint32(eBytes)), nil
}
func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) {
b64Url, err := stringFromMap(m, key)
if err != nil {
return nil, err
}
paramBytes, err := joseBase64UrlDecode(b64Url)
if err != nil {
return nil, fmt.Errorf("invaled base64 URL encoding: %s", err)
}
return new(big.Int).SetBytes(paramBytes), nil
}
func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) {
pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}}
for k, v := range headers {
switch val := v.(type) {
case string:
pemBlock.Headers[k] = val
case []string:
if k == "hosts" {
pemBlock.Headers[k] = strings.Join(val, ",")
} else {
// Return error, non-encodable type
}
default:
// Return error, non-encodable type
}
}
return pemBlock, nil
}
func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) {
cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err)
}
pubKey, err := FromCryptoPublicKey(cryptoPublicKey)
if err != nil {
return nil, err
}
addPEMHeadersToKey(pemBlock, pubKey)
return pubKey, nil
}
func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) {
for key, value := range pemBlock.Headers {
var safeVal interface{}
if key == "hosts" {
safeVal = strings.Split(value, ",")
} else {
safeVal = value
}
pubKey.AddExtendedField(key, safeVal)
}
}

View File

@@ -1,7 +0,0 @@
language: go
go:
- 1.0
- 1.1
- 1.2
- tip

View File

@@ -1,27 +0,0 @@
Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,7 +0,0 @@
context
=======
[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)
gorilla/context is a general purpose registry for global request variables.
Read the full documentation here: http://www.gorillatoolkit.org/pkg/context

View File

@@ -1,143 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package context
import (
"net/http"
"sync"
"time"
)
var (
mutex sync.RWMutex
data = make(map[*http.Request]map[interface{}]interface{})
datat = make(map[*http.Request]int64)
)
// Set stores a value for a given key in a given request.
func Set(r *http.Request, key, val interface{}) {
mutex.Lock()
if data[r] == nil {
data[r] = make(map[interface{}]interface{})
datat[r] = time.Now().Unix()
}
data[r][key] = val
mutex.Unlock()
}
// Get returns a value stored for a given key in a given request.
func Get(r *http.Request, key interface{}) interface{} {
mutex.RLock()
if ctx := data[r]; ctx != nil {
value := ctx[key]
mutex.RUnlock()
return value
}
mutex.RUnlock()
return nil
}
// GetOk returns stored value and presence state like multi-value return of map access.
func GetOk(r *http.Request, key interface{}) (interface{}, bool) {
mutex.RLock()
if _, ok := data[r]; ok {
value, ok := data[r][key]
mutex.RUnlock()
return value, ok
}
mutex.RUnlock()
return nil, false
}
// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests.
func GetAll(r *http.Request) map[interface{}]interface{} {
mutex.RLock()
if context, ok := data[r]; ok {
result := make(map[interface{}]interface{}, len(context))
for k, v := range context {
result[k] = v
}
mutex.RUnlock()
return result
}
mutex.RUnlock()
return nil
}
// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if
// the request was registered.
func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {
mutex.RLock()
context, ok := data[r]
result := make(map[interface{}]interface{}, len(context))
for k, v := range context {
result[k] = v
}
mutex.RUnlock()
return result, ok
}
// Delete removes a value stored for a given key in a given request.
func Delete(r *http.Request, key interface{}) {
mutex.Lock()
if data[r] != nil {
delete(data[r], key)
}
mutex.Unlock()
}
// Clear removes all values stored for a given request.
//
// This is usually called by a handler wrapper to clean up request
// variables at the end of a request lifetime. See ClearHandler().
func Clear(r *http.Request) {
mutex.Lock()
clear(r)
mutex.Unlock()
}
// clear is Clear without the lock.
func clear(r *http.Request) {
delete(data, r)
delete(datat, r)
}
// Purge removes request data stored for longer than maxAge, in seconds.
// It returns the amount of requests removed.
//
// If maxAge <= 0, all request data is removed.
//
// This is only used for sanity check: in case context cleaning was not
// properly set some request data can be kept forever, consuming an increasing
// amount of memory. In case this is detected, Purge() must be called
// periodically until the problem is fixed.
func Purge(maxAge int) int {
mutex.Lock()
count := 0
if maxAge <= 0 {
count = len(data)
data = make(map[*http.Request]map[interface{}]interface{})
datat = make(map[*http.Request]int64)
} else {
min := time.Now().Unix() - int64(maxAge)
for r := range data {
if datat[r] < min {
clear(r)
count++
}
}
}
mutex.Unlock()
return count
}
// ClearHandler wraps an http.Handler and clears request values at the end
// of a request lifetime.
func ClearHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer Clear(r)
h.ServeHTTP(w, r)
})
}

View File

@@ -1,82 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package context stores values shared during a request lifetime.
For example, a router can set variables extracted from the URL and later
application handlers can access those values, or it can be used to store
sessions values to be saved at the end of a request. There are several
others common uses.
The idea was posted by Brad Fitzpatrick to the go-nuts mailing list:
http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53
Here's the basic usage: first define the keys that you will need. The key
type is interface{} so a key can be of any type that supports equality.
Here we define a key using a custom int type to avoid name collisions:
package foo
import (
"github.com/gorilla/context"
)
type key int
const MyKey key = 0
Then set a variable. Variables are bound to an http.Request object, so you
need a request instance to set a value:
context.Set(r, MyKey, "bar")
The application can later access the variable using the same key you provided:
func MyHandler(w http.ResponseWriter, r *http.Request) {
// val is "bar".
val := context.Get(r, foo.MyKey)
// returns ("bar", true)
val, ok := context.GetOk(r, foo.MyKey)
// ...
}
And that's all about the basic usage. We discuss some other ideas below.
Any type can be stored in the context. To enforce a given type, make the key
private and wrap Get() and Set() to accept and return values of a specific
type:
type key int
const mykey key = 0
// GetMyKey returns a value for this package from the request values.
func GetMyKey(r *http.Request) SomeType {
if rv := context.Get(r, mykey); rv != nil {
return rv.(SomeType)
}
return nil
}
// SetMyKey sets a value for this package in the request values.
func SetMyKey(r *http.Request, val SomeType) {
context.Set(r, mykey, val)
}
Variables must be cleared at the end of a request, to remove all values
that were stored. This can be done in an http.Handler, after a request was
served. Just call Clear() passing the request:
context.Clear(r)
...or use ClearHandler(), which conveniently wraps an http.Handler to clear
variables at the end of a request lifetime.
The Routers from the packages gorilla/mux and gorilla/pat call Clear()
so if you are using either of them you don't need to clear the context manually.
*/
package context

View File

@@ -1,7 +0,0 @@
language: go
go:
- 1.0
- 1.1
- 1.2
- tip

View File

@@ -1,27 +0,0 @@
Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,7 +0,0 @@
mux
===
[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux)
gorilla/mux is a powerful URL router and dispatcher.
Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux

199
vendor/github.com/gorilla/mux/doc.go generated vendored
View File

@@ -1,199 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package gorilla/mux implements a request router and dispatcher.
The name mux stands for "HTTP request multiplexer". Like the standard
http.ServeMux, mux.Router matches incoming requests against a list of
registered routes and calls a handler for the route that matches the URL
or other conditions. The main features are:
* Requests can be matched based on URL host, path, path prefix, schemes,
header and query values, HTTP methods or using custom matchers.
* URL hosts and paths can have variables with an optional regular
expression.
* Registered URLs can be built, or "reversed", which helps maintaining
references to resources.
* Routes can be used as subrouters: nested routes are only tested if the
parent route matches. This is useful to define groups of routes that
share common conditions like a host, a path prefix or other repeated
attributes. As a bonus, this optimizes request matching.
* It implements the http.Handler interface so it is compatible with the
standard http.ServeMux.
Let's start registering a couple of URL paths and handlers:
func main() {
r := mux.NewRouter()
r.HandleFunc("/", HomeHandler)
r.HandleFunc("/products", ProductsHandler)
r.HandleFunc("/articles", ArticlesHandler)
http.Handle("/", r)
}
Here we register three routes mapping URL paths to handlers. This is
equivalent to how http.HandleFunc() works: if an incoming request URL matches
one of the paths, the corresponding handler is called passing
(http.ResponseWriter, *http.Request) as parameters.
Paths can have variables. They are defined using the format {name} or
{name:pattern}. If a regular expression pattern is not defined, the matched
variable will be anything until the next slash. For example:
r := mux.NewRouter()
r.HandleFunc("/products/{key}", ProductHandler)
r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
The names are used to create a map of route variables which can be retrieved
calling mux.Vars():
vars := mux.Vars(request)
category := vars["category"]
And this is all you need to know about the basic usage. More advanced options
are explained below.
Routes can also be restricted to a domain or subdomain. Just define a host
pattern to be matched. They can also have variables:
r := mux.NewRouter()
// Only matches if domain is "www.domain.com".
r.Host("www.domain.com")
// Matches a dynamic subdomain.
r.Host("{subdomain:[a-z]+}.domain.com")
There are several other matchers that can be added. To match path prefixes:
r.PathPrefix("/products/")
...or HTTP methods:
r.Methods("GET", "POST")
...or URL schemes:
r.Schemes("https")
...or header values:
r.Headers("X-Requested-With", "XMLHttpRequest")
...or query values:
r.Queries("key", "value")
...or to use a custom matcher function:
r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
return r.ProtoMajor == 0
})
...and finally, it is possible to combine several matchers in a single route:
r.HandleFunc("/products", ProductsHandler).
Host("www.domain.com").
Methods("GET").
Schemes("http")
Setting the same matching conditions again and again can be boring, so we have
a way to group several routes that share the same requirements.
We call it "subrouting".
For example, let's say we have several URLs that should only match when the
host is "www.domain.com". Create a route for that host and get a "subrouter"
from it:
r := mux.NewRouter()
s := r.Host("www.domain.com").Subrouter()
Then register routes in the subrouter:
s.HandleFunc("/products/", ProductsHandler)
s.HandleFunc("/products/{key}", ProductHandler)
s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
The three URL paths we registered above will only be tested if the domain is
"www.domain.com", because the subrouter is tested first. This is not
only convenient, but also optimizes request matching. You can create
subrouters combining any attribute matchers accepted by a route.
Subrouters can be used to create domain or path "namespaces": you define
subrouters in a central place and then parts of the app can register its
paths relatively to a given subrouter.
There's one more thing about subroutes. When a subrouter has a path prefix,
the inner routes use it as base for their paths:
r := mux.NewRouter()
s := r.PathPrefix("/products").Subrouter()
// "/products/"
s.HandleFunc("/", ProductsHandler)
// "/products/{key}/"
s.HandleFunc("/{key}/", ProductHandler)
// "/products/{key}/details"
s.HandleFunc("/{key}/details", ProductDetailsHandler)
Now let's see how to build registered URLs.
Routes can be named. All routes that define a name can have their URLs built,
or "reversed". We define a name calling Name() on a route. For example:
r := mux.NewRouter()
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
Name("article")
To build a URL, get the route and call the URL() method, passing a sequence of
key/value pairs for the route variables. For the previous route, we would do:
url, err := r.Get("article").URL("category", "technology", "id", "42")
...and the result will be a url.URL with the following path:
"/articles/technology/42"
This also works for host variables:
r := mux.NewRouter()
r.Host("{subdomain}.domain.com").
Path("/articles/{category}/{id:[0-9]+}").
HandlerFunc(ArticleHandler).
Name("article")
// url.String() will be "http://news.domain.com/articles/technology/42"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42")
All variables defined in the route are required, and their values must
conform to the corresponding patterns. These requirements guarantee that a
generated URL will always match a registered route -- the only exception is
for explicitly defined "build-only" routes which never match.
There's also a way to build only the URL host or path for a route:
use the methods URLHost() or URLPath() instead. For the previous route,
we would do:
// "http://news.domain.com/"
host, err := r.Get("article").URLHost("subdomain", "news")
// "/articles/technology/42"
path, err := r.Get("article").URLPath("category", "technology", "id", "42")
And if you use subrouters, host and path defined separately can be built
as well:
r := mux.NewRouter()
s := r.Host("{subdomain}.domain.com").Subrouter()
s.Path("/articles/{category}/{id:[0-9]+}").
HandlerFunc(ArticleHandler).
Name("article")
// "http://news.domain.com/articles/technology/42"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42")
*/
package mux

353
vendor/github.com/gorilla/mux/mux.go generated vendored
View File

@@ -1,353 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import (
"fmt"
"net/http"
"path"
"github.com/gorilla/context"
)
// NewRouter returns a new router instance.
func NewRouter() *Router {
return &Router{namedRoutes: make(map[string]*Route), KeepContext: false}
}
// Router registers routes to be matched and dispatches a handler.
//
// It implements the http.Handler interface, so it can be registered to serve
// requests:
//
// var router = mux.NewRouter()
//
// func main() {
// http.Handle("/", router)
// }
//
// Or, for Google App Engine, register it in a init() function:
//
// func init() {
// http.Handle("/", router)
// }
//
// This will send all incoming requests to the router.
type Router struct {
// Configurable Handler to be used when no route matches.
NotFoundHandler http.Handler
// Parent route, if this is a subrouter.
parent parentRoute
// Routes to be matched, in order.
routes []*Route
// Routes by name for URL building.
namedRoutes map[string]*Route
// See Router.StrictSlash(). This defines the flag for new routes.
strictSlash bool
// If true, do not clear the request context after handling the request
KeepContext bool
}
// Match matches registered routes against the request.
func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
for _, route := range r.routes {
if route.Match(req, match) {
return true
}
}
return false
}
// ServeHTTP dispatches the handler registered in the matched route.
//
// When there is a match, the route variables can be retrieved calling
// mux.Vars(request).
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Clean path to canonical form and redirect.
if p := cleanPath(req.URL.Path); p != req.URL.Path {
// Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query.
// This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue:
// http://code.google.com/p/go/issues/detail?id=5252
url := *req.URL
url.Path = p
p = url.String()
w.Header().Set("Location", p)
w.WriteHeader(http.StatusMovedPermanently)
return
}
var match RouteMatch
var handler http.Handler
if r.Match(req, &match) {
handler = match.Handler
setVars(req, match.Vars)
setCurrentRoute(req, match.Route)
}
if handler == nil {
handler = r.NotFoundHandler
if handler == nil {
handler = http.NotFoundHandler()
}
}
if !r.KeepContext {
defer context.Clear(req)
}
handler.ServeHTTP(w, req)
}
// Get returns a route registered with the given name.
func (r *Router) Get(name string) *Route {
return r.getNamedRoutes()[name]
}
// GetRoute returns a route registered with the given name. This method
// was renamed to Get() and remains here for backwards compatibility.
func (r *Router) GetRoute(name string) *Route {
return r.getNamedRoutes()[name]
}
// StrictSlash defines the trailing slash behavior for new routes. The initial
// value is false.
//
// When true, if the route path is "/path/", accessing "/path" will redirect
// to the former and vice versa. In other words, your application will always
// see the path as specified in the route.
//
// When false, if the route path is "/path", accessing "/path/" will not match
// this route and vice versa.
//
// Special case: when a route sets a path prefix using the PathPrefix() method,
// strict slash is ignored for that route because the redirect behavior can't
// be determined from a prefix alone. However, any subrouters created from that
// route inherit the original StrictSlash setting.
func (r *Router) StrictSlash(value bool) *Router {
r.strictSlash = value
return r
}
// ----------------------------------------------------------------------------
// parentRoute
// ----------------------------------------------------------------------------
// getNamedRoutes returns the map where named routes are registered.
func (r *Router) getNamedRoutes() map[string]*Route {
if r.namedRoutes == nil {
if r.parent != nil {
r.namedRoutes = r.parent.getNamedRoutes()
} else {
r.namedRoutes = make(map[string]*Route)
}
}
return r.namedRoutes
}
// getRegexpGroup returns regexp definitions from the parent route, if any.
func (r *Router) getRegexpGroup() *routeRegexpGroup {
if r.parent != nil {
return r.parent.getRegexpGroup()
}
return nil
}
// ----------------------------------------------------------------------------
// Route factories
// ----------------------------------------------------------------------------
// NewRoute registers an empty route.
func (r *Router) NewRoute() *Route {
route := &Route{parent: r, strictSlash: r.strictSlash}
r.routes = append(r.routes, route)
return route
}
// Handle registers a new route with a matcher for the URL path.
// See Route.Path() and Route.Handler().
func (r *Router) Handle(path string, handler http.Handler) *Route {
return r.NewRoute().Path(path).Handler(handler)
}
// HandleFunc registers a new route with a matcher for the URL path.
// See Route.Path() and Route.HandlerFunc().
func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
*http.Request)) *Route {
return r.NewRoute().Path(path).HandlerFunc(f)
}
// Headers registers a new route with a matcher for request header values.
// See Route.Headers().
func (r *Router) Headers(pairs ...string) *Route {
return r.NewRoute().Headers(pairs...)
}
// Host registers a new route with a matcher for the URL host.
// See Route.Host().
func (r *Router) Host(tpl string) *Route {
return r.NewRoute().Host(tpl)
}
// MatcherFunc registers a new route with a custom matcher function.
// See Route.MatcherFunc().
func (r *Router) MatcherFunc(f MatcherFunc) *Route {
return r.NewRoute().MatcherFunc(f)
}
// Methods registers a new route with a matcher for HTTP methods.
// See Route.Methods().
func (r *Router) Methods(methods ...string) *Route {
return r.NewRoute().Methods(methods...)
}
// Path registers a new route with a matcher for the URL path.
// See Route.Path().
func (r *Router) Path(tpl string) *Route {
return r.NewRoute().Path(tpl)
}
// PathPrefix registers a new route with a matcher for the URL path prefix.
// See Route.PathPrefix().
func (r *Router) PathPrefix(tpl string) *Route {
return r.NewRoute().PathPrefix(tpl)
}
// Queries registers a new route with a matcher for URL query values.
// See Route.Queries().
func (r *Router) Queries(pairs ...string) *Route {
return r.NewRoute().Queries(pairs...)
}
// Schemes registers a new route with a matcher for URL schemes.
// See Route.Schemes().
func (r *Router) Schemes(schemes ...string) *Route {
return r.NewRoute().Schemes(schemes...)
}
// ----------------------------------------------------------------------------
// Context
// ----------------------------------------------------------------------------
// RouteMatch stores information about a matched route.
type RouteMatch struct {
Route *Route
Handler http.Handler
Vars map[string]string
}
type contextKey int
const (
varsKey contextKey = iota
routeKey
)
// Vars returns the route variables for the current request, if any.
func Vars(r *http.Request) map[string]string {
if rv := context.Get(r, varsKey); rv != nil {
return rv.(map[string]string)
}
return nil
}
// CurrentRoute returns the matched route for the current request, if any.
func CurrentRoute(r *http.Request) *Route {
if rv := context.Get(r, routeKey); rv != nil {
return rv.(*Route)
}
return nil
}
func setVars(r *http.Request, val interface{}) {
context.Set(r, varsKey, val)
}
func setCurrentRoute(r *http.Request, val interface{}) {
context.Set(r, routeKey, val)
}
// ----------------------------------------------------------------------------
// Helpers
// ----------------------------------------------------------------------------
// cleanPath returns the canonical path for p, eliminating . and .. elements.
// Borrowed from the net/http package.
func cleanPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
// path.Clean removes trailing slash except for root;
// put the trailing slash back if necessary.
if p[len(p)-1] == '/' && np != "/" {
np += "/"
}
return np
}
// uniqueVars returns an error if two slices contain duplicated strings.
func uniqueVars(s1, s2 []string) error {
for _, v1 := range s1 {
for _, v2 := range s2 {
if v1 == v2 {
return fmt.Errorf("mux: duplicated route variable %q", v2)
}
}
}
return nil
}
// mapFromPairs converts variadic string parameters to a string map.
func mapFromPairs(pairs ...string) (map[string]string, error) {
length := len(pairs)
if length%2 != 0 {
return nil, fmt.Errorf(
"mux: number of parameters must be multiple of 2, got %v", pairs)
}
m := make(map[string]string, length/2)
for i := 0; i < length; i += 2 {
m[pairs[i]] = pairs[i+1]
}
return m, nil
}
// matchInArray returns true if the given string value is in the array.
func matchInArray(arr []string, value string) bool {
for _, v := range arr {
if v == value {
return true
}
}
return false
}
// matchMap returns true if the given key/value pairs exist in a given map.
func matchMap(toCheck map[string]string, toMatch map[string][]string,
canonicalKey bool) bool {
for k, v := range toCheck {
// Check if key exists.
if canonicalKey {
k = http.CanonicalHeaderKey(k)
}
if values := toMatch[k]; values == nil {
return false
} else if v != "" {
// If value was defined as an empty string we only check that the
// key exists. Otherwise we also check for equality.
valueExists := false
for _, value := range values {
if v == value {
valueExists = true
break
}
}
if !valueExists {
return false
}
}
}
return true
}

View File

@@ -1,276 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import (
"bytes"
"fmt"
"net/http"
"net/url"
"regexp"
"strings"
)
// newRouteRegexp parses a route template and returns a routeRegexp,
// used to match a host, a path or a query string.
//
// It will extract named variables, assemble a regexp to be matched, create
// a "reverse" template to build URLs and compile regexps to validate variable
// values used in URL building.
//
// Previously we accepted only Python-like identifiers for variable
// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
// name and pattern can't be empty, and names can't contain a colon.
func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) {
// Check if it is well-formed.
idxs, errBraces := braceIndices(tpl)
if errBraces != nil {
return nil, errBraces
}
// Backup the original.
template := tpl
// Now let's parse it.
defaultPattern := "[^/]+"
if matchQuery {
defaultPattern = "[^?&]+"
matchPrefix = true
} else if matchHost {
defaultPattern = "[^.]+"
matchPrefix = false
}
// Only match strict slash if not matching
if matchPrefix || matchHost || matchQuery {
strictSlash = false
}
// Set a flag for strictSlash.
endSlash := false
if strictSlash && strings.HasSuffix(tpl, "/") {
tpl = tpl[:len(tpl)-1]
endSlash = true
}
varsN := make([]string, len(idxs)/2)
varsR := make([]*regexp.Regexp, len(idxs)/2)
pattern := bytes.NewBufferString("")
if !matchQuery {
pattern.WriteByte('^')
}
reverse := bytes.NewBufferString("")
var end int
var err error
for i := 0; i < len(idxs); i += 2 {
// Set all values we are interested in.
raw := tpl[end:idxs[i]]
end = idxs[i+1]
parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
name := parts[0]
patt := defaultPattern
if len(parts) == 2 {
patt = parts[1]
}
// Name or pattern can't be empty.
if name == "" || patt == "" {
return nil, fmt.Errorf("mux: missing name or pattern in %q",
tpl[idxs[i]:end])
}
// Build the regexp pattern.
fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt)
// Build the reverse template.
fmt.Fprintf(reverse, "%s%%s", raw)
// Append variable name and compiled pattern.
varsN[i/2] = name
varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
if err != nil {
return nil, err
}
}
// Add the remaining.
raw := tpl[end:]
pattern.WriteString(regexp.QuoteMeta(raw))
if strictSlash {
pattern.WriteString("[/]?")
}
if !matchPrefix {
pattern.WriteByte('$')
}
reverse.WriteString(raw)
if endSlash {
reverse.WriteByte('/')
}
// Compile full regexp.
reg, errCompile := regexp.Compile(pattern.String())
if errCompile != nil {
return nil, errCompile
}
// Done!
return &routeRegexp{
template: template,
matchHost: matchHost,
matchQuery: matchQuery,
strictSlash: strictSlash,
regexp: reg,
reverse: reverse.String(),
varsN: varsN,
varsR: varsR,
}, nil
}
// routeRegexp stores a regexp to match a host or path and information to
// collect and validate route variables.
type routeRegexp struct {
// The unmodified template.
template string
// True for host match, false for path or query string match.
matchHost bool
// True for query string match, false for path and host match.
matchQuery bool
// The strictSlash value defined on the route, but disabled if PathPrefix was used.
strictSlash bool
// Expanded regexp.
regexp *regexp.Regexp
// Reverse template.
reverse string
// Variable names.
varsN []string
// Variable regexps (validators).
varsR []*regexp.Regexp
}
// Match matches the regexp against the URL host or path.
func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
if !r.matchHost {
if r.matchQuery {
return r.regexp.MatchString(req.URL.RawQuery)
} else {
return r.regexp.MatchString(req.URL.Path)
}
}
return r.regexp.MatchString(getHost(req))
}
// url builds a URL part using the given values.
func (r *routeRegexp) url(pairs ...string) (string, error) {
values, err := mapFromPairs(pairs...)
if err != nil {
return "", err
}
urlValues := make([]interface{}, len(r.varsN))
for k, v := range r.varsN {
value, ok := values[v]
if !ok {
return "", fmt.Errorf("mux: missing route variable %q", v)
}
urlValues[k] = value
}
rv := fmt.Sprintf(r.reverse, urlValues...)
if !r.regexp.MatchString(rv) {
// The URL is checked against the full regexp, instead of checking
// individual variables. This is faster but to provide a good error
// message, we check individual regexps if the URL doesn't match.
for k, v := range r.varsN {
if !r.varsR[k].MatchString(values[v]) {
return "", fmt.Errorf(
"mux: variable %q doesn't match, expected %q", values[v],
r.varsR[k].String())
}
}
}
return rv, nil
}
// braceIndices returns the first level curly brace indices from a string.
// It returns an error in case of unbalanced braces.
func braceIndices(s string) ([]int, error) {
var level, idx int
idxs := make([]int, 0)
for i := 0; i < len(s); i++ {
switch s[i] {
case '{':
if level++; level == 1 {
idx = i
}
case '}':
if level--; level == 0 {
idxs = append(idxs, idx, i+1)
} else if level < 0 {
return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
}
}
}
if level != 0 {
return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
}
return idxs, nil
}
// ----------------------------------------------------------------------------
// routeRegexpGroup
// ----------------------------------------------------------------------------
// routeRegexpGroup groups the route matchers that carry variables.
type routeRegexpGroup struct {
host *routeRegexp
path *routeRegexp
queries []*routeRegexp
}
// setMatch extracts the variables from the URL once a route matches.
func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
// Store host variables.
if v.host != nil {
hostVars := v.host.regexp.FindStringSubmatch(getHost(req))
if hostVars != nil {
for k, v := range v.host.varsN {
m.Vars[v] = hostVars[k+1]
}
}
}
// Store path variables.
if v.path != nil {
pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path)
if pathVars != nil {
for k, v := range v.path.varsN {
m.Vars[v] = pathVars[k+1]
}
// Check if we should redirect.
if v.path.strictSlash {
p1 := strings.HasSuffix(req.URL.Path, "/")
p2 := strings.HasSuffix(v.path.template, "/")
if p1 != p2 {
u, _ := url.Parse(req.URL.String())
if p1 {
u.Path = u.Path[:len(u.Path)-1]
} else {
u.Path += "/"
}
m.Handler = http.RedirectHandler(u.String(), 301)
}
}
}
}
// Store query string variables.
rawQuery := req.URL.RawQuery
for _, q := range v.queries {
queryVars := q.regexp.FindStringSubmatch(rawQuery)
if queryVars != nil {
for k, v := range q.varsN {
m.Vars[v] = queryVars[k+1]
}
}
}
}
// getHost tries its best to return the request host.
func getHost(r *http.Request) string {
if r.URL.IsAbs() {
return r.URL.Host
}
host := r.Host
// Slice off any port information.
if i := strings.Index(host, ":"); i != -1 {
host = host[:i]
}
return host
}

View File

@@ -1,524 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import (
"errors"
"fmt"
"net/http"
"net/url"
"strings"
)
// Route stores information to match a request and build URLs.
type Route struct {
// Parent where the route was registered (a Router).
parent parentRoute
// Request handler for the route.
handler http.Handler
// List of matchers.
matchers []matcher
// Manager for the variables from host and path.
regexp *routeRegexpGroup
// If true, when the path pattern is "/path/", accessing "/path" will
// redirect to the former and vice versa.
strictSlash bool
// If true, this route never matches: it is only used to build URLs.
buildOnly bool
// The name used to build URLs.
name string
// Error resulted from building a route.
err error
}
// Match matches the route against the request.
func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
if r.buildOnly || r.err != nil {
return false
}
// Match everything.
for _, m := range r.matchers {
if matched := m.Match(req, match); !matched {
return false
}
}
// Yay, we have a match. Let's collect some info about it.
if match.Route == nil {
match.Route = r
}
if match.Handler == nil {
match.Handler = r.handler
}
if match.Vars == nil {
match.Vars = make(map[string]string)
}
// Set variables.
if r.regexp != nil {
r.regexp.setMatch(req, match, r)
}
return true
}
// ----------------------------------------------------------------------------
// Route attributes
// ----------------------------------------------------------------------------
// GetError returns an error resulted from building the route, if any.
func (r *Route) GetError() error {
return r.err
}
// BuildOnly sets the route to never match: it is only used to build URLs.
func (r *Route) BuildOnly() *Route {
r.buildOnly = true
return r
}
// Handler --------------------------------------------------------------------
// Handler sets a handler for the route.
func (r *Route) Handler(handler http.Handler) *Route {
if r.err == nil {
r.handler = handler
}
return r
}
// HandlerFunc sets a handler function for the route.
func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
return r.Handler(http.HandlerFunc(f))
}
// GetHandler returns the handler for the route, if any.
func (r *Route) GetHandler() http.Handler {
return r.handler
}
// Name -----------------------------------------------------------------------
// Name sets the name for the route, used to build URLs.
// If the name was registered already it will be overwritten.
func (r *Route) Name(name string) *Route {
if r.name != "" {
r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
r.name, name)
}
if r.err == nil {
r.name = name
r.getNamedRoutes()[name] = r
}
return r
}
// GetName returns the name for the route, if any.
func (r *Route) GetName() string {
return r.name
}
// ----------------------------------------------------------------------------
// Matchers
// ----------------------------------------------------------------------------
// matcher types try to match a request.
type matcher interface {
Match(*http.Request, *RouteMatch) bool
}
// addMatcher adds a matcher to the route.
func (r *Route) addMatcher(m matcher) *Route {
if r.err == nil {
r.matchers = append(r.matchers, m)
}
return r
}
// addRegexpMatcher adds a host or path matcher and builder to a route.
func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {
if r.err != nil {
return r.err
}
r.regexp = r.getRegexpGroup()
if !matchHost && !matchQuery {
if len(tpl) == 0 || tpl[0] != '/' {
return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
}
if r.regexp.path != nil {
tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
}
}
rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash)
if err != nil {
return err
}
for _, q := range r.regexp.queries {
if err = uniqueVars(rr.varsN, q.varsN); err != nil {
return err
}
}
if matchHost {
if r.regexp.path != nil {
if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
return err
}
}
r.regexp.host = rr
} else {
if r.regexp.host != nil {
if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
return err
}
}
if matchQuery {
r.regexp.queries = append(r.regexp.queries, rr)
} else {
r.regexp.path = rr
}
}
r.addMatcher(rr)
return nil
}
// Headers --------------------------------------------------------------------
// headerMatcher matches the request against header values.
type headerMatcher map[string]string
func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchMap(m, r.Header, true)
}
// Headers adds a matcher for request header values.
// It accepts a sequence of key/value pairs to be matched. For example:
//
// r := mux.NewRouter()
// r.Headers("Content-Type", "application/json",
// "X-Requested-With", "XMLHttpRequest")
//
// The above route will only match if both request header values match.
//
// It the value is an empty string, it will match any value if the key is set.
func (r *Route) Headers(pairs ...string) *Route {
if r.err == nil {
var headers map[string]string
headers, r.err = mapFromPairs(pairs...)
return r.addMatcher(headerMatcher(headers))
}
return r
}
// Host -----------------------------------------------------------------------
// Host adds a matcher for the URL host.
// It accepts a template with zero or more URL variables enclosed by {}.
// Variables can define an optional regexp pattern to me matched:
//
// - {name} matches anything until the next dot.
//
// - {name:pattern} matches the given regexp pattern.
//
// For example:
//
// r := mux.NewRouter()
// r.Host("www.domain.com")
// r.Host("{subdomain}.domain.com")
// r.Host("{subdomain:[a-z]+}.domain.com")
//
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
func (r *Route) Host(tpl string) *Route {
r.err = r.addRegexpMatcher(tpl, true, false, false)
return r
}
// MatcherFunc ----------------------------------------------------------------
// MatcherFunc is the function signature used by custom matchers.
type MatcherFunc func(*http.Request, *RouteMatch) bool
func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
return m(r, match)
}
// MatcherFunc adds a custom function to be used as request matcher.
func (r *Route) MatcherFunc(f MatcherFunc) *Route {
return r.addMatcher(f)
}
// Methods --------------------------------------------------------------------
// methodMatcher matches the request against HTTP methods.
type methodMatcher []string
func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchInArray(m, r.Method)
}
// Methods adds a matcher for HTTP methods.
// It accepts a sequence of one or more methods to be matched, e.g.:
// "GET", "POST", "PUT".
func (r *Route) Methods(methods ...string) *Route {
for k, v := range methods {
methods[k] = strings.ToUpper(v)
}
return r.addMatcher(methodMatcher(methods))
}
// Path -----------------------------------------------------------------------
// Path adds a matcher for the URL path.
// It accepts a template with zero or more URL variables enclosed by {}. The
// template must start with a "/".
// Variables can define an optional regexp pattern to me matched:
//
// - {name} matches anything until the next slash.
//
// - {name:pattern} matches the given regexp pattern.
//
// For example:
//
// r := mux.NewRouter()
// r.Path("/products/").Handler(ProductsHandler)
// r.Path("/products/{key}").Handler(ProductsHandler)
// r.Path("/articles/{category}/{id:[0-9]+}").
// Handler(ArticleHandler)
//
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
func (r *Route) Path(tpl string) *Route {
r.err = r.addRegexpMatcher(tpl, false, false, false)
return r
}
// PathPrefix -----------------------------------------------------------------
// PathPrefix adds a matcher for the URL path prefix. This matches if the given
// template is a prefix of the full URL path. See Route.Path() for details on
// the tpl argument.
//
// Note that it does not treat slashes specially ("/foobar/" will be matched by
// the prefix "/foo") so you may want to use a trailing slash here.
//
// Also note that the setting of Router.StrictSlash() has no effect on routes
// with a PathPrefix matcher.
func (r *Route) PathPrefix(tpl string) *Route {
r.err = r.addRegexpMatcher(tpl, false, true, false)
return r
}
// Query ----------------------------------------------------------------------
// Queries adds a matcher for URL query values.
// It accepts a sequence of key/value pairs. Values may define variables.
// For example:
//
// r := mux.NewRouter()
// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
//
// The above route will only match if the URL contains the defined queries
// values, e.g.: ?foo=bar&id=42.
//
// It the value is an empty string, it will match any value if the key is set.
//
// Variables can define an optional regexp pattern to me matched:
//
// - {name} matches anything until the next slash.
//
// - {name:pattern} matches the given regexp pattern.
func (r *Route) Queries(pairs ...string) *Route {
length := len(pairs)
if length%2 != 0 {
r.err = fmt.Errorf(
"mux: number of parameters must be multiple of 2, got %v", pairs)
return nil
}
for i := 0; i < length; i += 2 {
if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil {
return r
}
}
return r
}
// Schemes --------------------------------------------------------------------
// schemeMatcher matches the request against URL schemes.
type schemeMatcher []string
func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchInArray(m, r.URL.Scheme)
}
// Schemes adds a matcher for URL schemes.
// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
func (r *Route) Schemes(schemes ...string) *Route {
for k, v := range schemes {
schemes[k] = strings.ToLower(v)
}
return r.addMatcher(schemeMatcher(schemes))
}
// Subrouter ------------------------------------------------------------------
// Subrouter creates a subrouter for the route.
//
// It will test the inner routes only if the parent route matched. For example:
//
// r := mux.NewRouter()
// s := r.Host("www.domain.com").Subrouter()
// s.HandleFunc("/products/", ProductsHandler)
// s.HandleFunc("/products/{key}", ProductHandler)
// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
//
// Here, the routes registered in the subrouter won't be tested if the host
// doesn't match.
func (r *Route) Subrouter() *Router {
router := &Router{parent: r, strictSlash: r.strictSlash}
r.addMatcher(router)
return router
}
// ----------------------------------------------------------------------------
// URL building
// ----------------------------------------------------------------------------
// URL builds a URL for the route.
//
// It accepts a sequence of key/value pairs for the route variables. For
// example, given this route:
//
// r := mux.NewRouter()
// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
// Name("article")
//
// ...a URL for it can be built using:
//
// url, err := r.Get("article").URL("category", "technology", "id", "42")
//
// ...which will return an url.URL with the following path:
//
// "/articles/technology/42"
//
// This also works for host variables:
//
// r := mux.NewRouter()
// r.Host("{subdomain}.domain.com").
// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
// Name("article")
//
// // url.String() will be "http://news.domain.com/articles/technology/42"
// url, err := r.Get("article").URL("subdomain", "news",
// "category", "technology",
// "id", "42")
//
// All variables defined in the route are required, and their values must
// conform to the corresponding patterns.
func (r *Route) URL(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp == nil {
return nil, errors.New("mux: route doesn't have a host or path")
}
var scheme, host, path string
var err error
if r.regexp.host != nil {
// Set a default scheme.
scheme = "http"
if host, err = r.regexp.host.url(pairs...); err != nil {
return nil, err
}
}
if r.regexp.path != nil {
if path, err = r.regexp.path.url(pairs...); err != nil {
return nil, err
}
}
return &url.URL{
Scheme: scheme,
Host: host,
Path: path,
}, nil
}
// URLHost builds the host part of the URL for a route. See Route.URL().
//
// The route must have a host defined.
func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp == nil || r.regexp.host == nil {
return nil, errors.New("mux: route doesn't have a host")
}
host, err := r.regexp.host.url(pairs...)
if err != nil {
return nil, err
}
return &url.URL{
Scheme: "http",
Host: host,
}, nil
}
// URLPath builds the path part of the URL for a route. See Route.URL().
//
// The route must have a path defined.
func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp == nil || r.regexp.path == nil {
return nil, errors.New("mux: route doesn't have a path")
}
path, err := r.regexp.path.url(pairs...)
if err != nil {
return nil, err
}
return &url.URL{
Path: path,
}, nil
}
// ----------------------------------------------------------------------------
// parentRoute
// ----------------------------------------------------------------------------
// parentRoute allows routes to know about parent host and path definitions.
type parentRoute interface {
getNamedRoutes() map[string]*Route
getRegexpGroup() *routeRegexpGroup
}
// getNamedRoutes returns the map where named routes are registered.
func (r *Route) getNamedRoutes() map[string]*Route {
if r.parent == nil {
// During tests router is not always set.
r.parent = NewRouter()
}
return r.parent.getNamedRoutes()
}
// getRegexpGroup returns regexp definitions from this route.
func (r *Route) getRegexpGroup() *routeRegexpGroup {
if r.regexp == nil {
if r.parent == nil {
// During tests router is not always set.
r.parent = NewRouter()
}
regexp := r.parent.getRegexpGroup()
if regexp == nil {
r.regexp = new(routeRegexpGroup)
} else {
// Copy.
r.regexp = &routeRegexpGroup{
host: regexp.host,
path: regexp.path,
queries: regexp.queries,
}
}
}
return r.regexp
}

View File

@@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,2 +0,0 @@
Tianon Gravi <admwiggin@gmail.com> (@tianon)
Aleksa Sarai <cyphar@cyphar.com> (@cyphar)

View File

@@ -1,108 +0,0 @@
package user
import (
"errors"
"fmt"
"syscall"
)
var (
// The current operating system does not provide the required data for user lookups.
ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data")
)
func lookupUser(filter func(u User) bool) (User, error) {
// Get operating system-specific passwd reader-closer.
passwd, err := GetPasswd()
if err != nil {
return User{}, err
}
defer passwd.Close()
// Get the users.
users, err := ParsePasswdFilter(passwd, filter)
if err != nil {
return User{}, err
}
// No user entries found.
if len(users) == 0 {
return User{}, fmt.Errorf("no matching entries in passwd file")
}
// Assume the first entry is the "correct" one.
return users[0], nil
}
// CurrentUser looks up the current user by their user id in /etc/passwd. If the
// user cannot be found (or there is no /etc/passwd file on the filesystem),
// then CurrentUser returns an error.
func CurrentUser() (User, error) {
return LookupUid(syscall.Getuid())
}
// LookupUser looks up a user by their username in /etc/passwd. If the user
// cannot be found (or there is no /etc/passwd file on the filesystem), then
// LookupUser returns an error.
func LookupUser(username string) (User, error) {
return lookupUser(func(u User) bool {
return u.Name == username
})
}
// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
// be found (or there is no /etc/passwd file on the filesystem), then LookupId
// returns an error.
func LookupUid(uid int) (User, error) {
return lookupUser(func(u User) bool {
return u.Uid == uid
})
}
func lookupGroup(filter func(g Group) bool) (Group, error) {
// Get operating system-specific group reader-closer.
group, err := GetGroup()
if err != nil {
return Group{}, err
}
defer group.Close()
// Get the users.
groups, err := ParseGroupFilter(group, filter)
if err != nil {
return Group{}, err
}
// No user entries found.
if len(groups) == 0 {
return Group{}, fmt.Errorf("no matching entries in group file")
}
// Assume the first entry is the "correct" one.
return groups[0], nil
}
// CurrentGroup looks up the current user's group by their primary group id's
// entry in /etc/passwd. If the group cannot be found (or there is no
// /etc/group file on the filesystem), then CurrentGroup returns an error.
func CurrentGroup() (Group, error) {
return LookupGid(syscall.Getgid())
}
// LookupGroup looks up a group by its name in /etc/group. If the group cannot
// be found (or there is no /etc/group file on the filesystem), then LookupGroup
// returns an error.
func LookupGroup(groupname string) (Group, error) {
return lookupGroup(func(g Group) bool {
return g.Name == groupname
})
}
// LookupGid looks up a group by its group id in /etc/group. If the group cannot
// be found (or there is no /etc/group file on the filesystem), then LookupGid
// returns an error.
func LookupGid(gid int) (Group, error) {
return lookupGroup(func(g Group) bool {
return g.Gid == gid
})
}

View File

@@ -1,30 +0,0 @@
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package user
import (
"io"
"os"
)
// Unix-specific path to the passwd and group formatted files.
const (
unixPasswdPath = "/etc/passwd"
unixGroupPath = "/etc/group"
)
func GetPasswdPath() (string, error) {
return unixPasswdPath, nil
}
func GetPasswd() (io.ReadCloser, error) {
return os.Open(unixPasswdPath)
}
func GetGroupPath() (string, error) {
return unixGroupPath, nil
}
func GetGroup() (io.ReadCloser, error) {
return os.Open(unixGroupPath)
}

View File

@@ -1,21 +0,0 @@
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package user
import "io"
func GetPasswdPath() (string, error) {
return "", ErrUnsupported
}
func GetPasswd() (io.ReadCloser, error) {
return nil, ErrUnsupported
}
func GetGroupPath() (string, error) {
return "", ErrUnsupported
}
func GetGroup() (io.ReadCloser, error) {
return nil, ErrUnsupported
}

View File

@@ -1,418 +0,0 @@
package user
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
const (
minId = 0
maxId = 1<<31 - 1 //for 32-bit systems compatibility
)
var (
ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId)
)
type User struct {
Name string
Pass string
Uid int
Gid int
Gecos string
Home string
Shell string
}
type Group struct {
Name string
Pass string
Gid int
List []string
}
func parseLine(line string, v ...interface{}) {
if line == "" {
return
}
parts := strings.Split(line, ":")
for i, p := range parts {
if len(v) <= i {
// if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files
break
}
switch e := v[i].(type) {
case *string:
// "root", "adm", "/bin/bash"
*e = p
case *int:
// "0", "4", "1000"
// ignore string to int conversion errors, for great "tolerance" of naughty configuration files
*e, _ = strconv.Atoi(p)
case *[]string:
// "", "root", "root,adm,daemon"
if p != "" {
*e = strings.Split(p, ",")
} else {
*e = []string{}
}
default:
// panic, because this is a programming/logic error, not a runtime one
panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!")
}
}
}
func ParsePasswdFile(path string) ([]User, error) {
passwd, err := os.Open(path)
if err != nil {
return nil, err
}
defer passwd.Close()
return ParsePasswd(passwd)
}
func ParsePasswd(passwd io.Reader) ([]User, error) {
return ParsePasswdFilter(passwd, nil)
}
func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) {
passwd, err := os.Open(path)
if err != nil {
return nil, err
}
defer passwd.Close()
return ParsePasswdFilter(passwd, filter)
}
func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
if r == nil {
return nil, fmt.Errorf("nil source for passwd-formatted data")
}
var (
s = bufio.NewScanner(r)
out = []User{}
)
for s.Scan() {
if err := s.Err(); err != nil {
return nil, err
}
text := strings.TrimSpace(s.Text())
if text == "" {
continue
}
// see: man 5 passwd
// name:password:UID:GID:GECOS:directory:shell
// Name:Pass:Uid:Gid:Gecos:Home:Shell
// root:x:0:0:root:/root:/bin/bash
// adm:x:3:4:adm:/var/adm:/bin/false
p := User{}
parseLine(
text,
&p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell,
)
if filter == nil || filter(p) {
out = append(out, p)
}
}
return out, nil
}
func ParseGroupFile(path string) ([]Group, error) {
group, err := os.Open(path)
if err != nil {
return nil, err
}
defer group.Close()
return ParseGroup(group)
}
func ParseGroup(group io.Reader) ([]Group, error) {
return ParseGroupFilter(group, nil)
}
func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) {
group, err := os.Open(path)
if err != nil {
return nil, err
}
defer group.Close()
return ParseGroupFilter(group, filter)
}
func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
if r == nil {
return nil, fmt.Errorf("nil source for group-formatted data")
}
var (
s = bufio.NewScanner(r)
out = []Group{}
)
for s.Scan() {
if err := s.Err(); err != nil {
return nil, err
}
text := s.Text()
if text == "" {
continue
}
// see: man 5 group
// group_name:password:GID:user_list
// Name:Pass:Gid:List
// root:x:0:root
// adm:x:4:root,adm,daemon
p := Group{}
parseLine(
text,
&p.Name, &p.Pass, &p.Gid, &p.List,
)
if filter == nil || filter(p) {
out = append(out, p)
}
}
return out, nil
}
type ExecUser struct {
Uid, Gid int
Sgids []int
Home string
}
// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the
// given file paths and uses that data as the arguments to GetExecUser. If the
// files cannot be opened for any reason, the error is ignored and a nil
// io.Reader is passed instead.
func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) {
passwd, err := os.Open(passwdPath)
if err != nil {
passwd = nil
} else {
defer passwd.Close()
}
group, err := os.Open(groupPath)
if err != nil {
group = nil
} else {
defer group.Close()
}
return GetExecUser(userSpec, defaults, passwd, group)
}
// GetExecUser parses a user specification string (using the passwd and group
// readers as sources for /etc/passwd and /etc/group data, respectively). In
// the case of blank fields or missing data from the sources, the values in
// defaults is used.
//
// GetExecUser will return an error if a user or group literal could not be
// found in any entry in passwd and group respectively.
//
// Examples of valid user specifications are:
// * ""
// * "user"
// * "uid"
// * "user:group"
// * "uid:gid
// * "user:gid"
// * "uid:group"
func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) {
var (
userArg, groupArg string
name string
)
if defaults == nil {
defaults = new(ExecUser)
}
// Copy over defaults.
user := &ExecUser{
Uid: defaults.Uid,
Gid: defaults.Gid,
Sgids: defaults.Sgids,
Home: defaults.Home,
}
// Sgids slice *cannot* be nil.
if user.Sgids == nil {
user.Sgids = []int{}
}
// allow for userArg to have either "user" syntax, or optionally "user:group" syntax
parseLine(userSpec, &userArg, &groupArg)
users, err := ParsePasswdFilter(passwd, func(u User) bool {
if userArg == "" {
return u.Uid == user.Uid
}
return u.Name == userArg || strconv.Itoa(u.Uid) == userArg
})
if err != nil && passwd != nil {
if userArg == "" {
userArg = strconv.Itoa(user.Uid)
}
return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err)
}
haveUser := users != nil && len(users) > 0
if haveUser {
// if we found any user entries that matched our filter, let's take the first one as "correct"
name = users[0].Name
user.Uid = users[0].Uid
user.Gid = users[0].Gid
user.Home = users[0].Home
} else if userArg != "" {
// we asked for a user but didn't find them... let's check to see if we wanted a numeric user
user.Uid, err = strconv.Atoi(userArg)
if err != nil {
// not numeric - we have to bail
return nil, fmt.Errorf("Unable to find user %v", userArg)
}
// Must be inside valid uid range.
if user.Uid < minId || user.Uid > maxId {
return nil, ErrRange
}
// if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit
}
if groupArg != "" || name != "" {
groups, err := ParseGroupFilter(group, func(g Group) bool {
// Explicit group format takes precedence.
if groupArg != "" {
return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg
}
// Check if user is a member.
for _, u := range g.List {
if u == name {
return true
}
}
return false
})
if err != nil && group != nil {
return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err)
}
haveGroup := groups != nil && len(groups) > 0
if groupArg != "" {
if haveGroup {
// if we found any group entries that matched our filter, let's take the first one as "correct"
user.Gid = groups[0].Gid
} else {
// we asked for a group but didn't find id... let's check to see if we wanted a numeric group
user.Gid, err = strconv.Atoi(groupArg)
if err != nil {
// not numeric - we have to bail
return nil, fmt.Errorf("Unable to find group %v", groupArg)
}
// Ensure gid is inside gid range.
if user.Gid < minId || user.Gid > maxId {
return nil, ErrRange
}
// if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit
}
} else if haveGroup {
// If implicit group format, fill supplementary gids.
user.Sgids = make([]int, len(groups))
for i, group := range groups {
user.Sgids[i] = group.Gid
}
}
}
return user, nil
}
// GetAdditionalGroups looks up a list of groups by name or group id
// against the given /etc/group formatted data. If a group name cannot
// be found, an error will be returned. If a group id cannot be found,
// or the given group data is nil, the id will be returned as-is
// provided it is in the legal range.
func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) {
var groups = []Group{}
if group != nil {
var err error
groups, err = ParseGroupFilter(group, func(g Group) bool {
for _, ag := range additionalGroups {
if g.Name == ag || strconv.Itoa(g.Gid) == ag {
return true
}
}
return false
})
if err != nil {
return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err)
}
}
gidMap := make(map[int]struct{})
for _, ag := range additionalGroups {
var found bool
for _, g := range groups {
// if we found a matched group either by name or gid, take the
// first matched as correct
if g.Name == ag || strconv.Itoa(g.Gid) == ag {
if _, ok := gidMap[g.Gid]; !ok {
gidMap[g.Gid] = struct{}{}
found = true
break
}
}
}
// we asked for a group but didn't find it. let's check to see
// if we wanted a numeric group
if !found {
gid, err := strconv.Atoi(ag)
if err != nil {
return nil, fmt.Errorf("Unable to find group %s", ag)
}
// Ensure gid is inside gid range.
if gid < minId || gid > maxId {
return nil, ErrRange
}
gidMap[gid] = struct{}{}
}
}
gids := []int{}
for gid := range gidMap {
gids = append(gids, gid)
}
return gids, nil
}
// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups
// that opens the groupPath given and gives it as an argument to
// GetAdditionalGroups.
func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) {
group, err := os.Open(groupPath)
if err == nil {
defer group.Close()
}
return GetAdditionalGroups(additionalGroups, group)
}

View File

@@ -1,19 +0,0 @@
Copyright (c) 2015 Vincent Batts, Raleigh, NC, USA
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -1,329 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tar implements access to tar archives.
// It aims to cover most of the variations, including those produced
// by GNU and BSD tars.
//
// References:
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
// http://www.gnu.org/software/tar/manual/html_node/Standard.html
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
package tar
import (
"bytes"
"errors"
"fmt"
"os"
"path"
"time"
)
const (
blockSize = 512
// Types
TypeReg = '0' // regular file
TypeRegA = '\x00' // regular file
TypeLink = '1' // hard link
TypeSymlink = '2' // symbolic link
TypeChar = '3' // character device node
TypeBlock = '4' // block device node
TypeDir = '5' // directory
TypeFifo = '6' // fifo node
TypeCont = '7' // reserved
TypeXHeader = 'x' // extended header
TypeXGlobalHeader = 'g' // global extended header
TypeGNULongName = 'L' // Next file has a long name
TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
TypeGNUSparse = 'S' // sparse file
)
// A Header represents a single header in a tar archive.
// Some fields may not be populated.
type Header struct {
Name string // name of header file entry
Mode int64 // permission and mode bits
Uid int // user id of owner
Gid int // group id of owner
Size int64 // length in bytes
ModTime time.Time // modified time
Typeflag byte // type of header entry
Linkname string // target name of link
Uname string // user name of owner
Gname string // group name of owner
Devmajor int64 // major number of character or block device
Devminor int64 // minor number of character or block device
AccessTime time.Time // access time
ChangeTime time.Time // status change time
Xattrs map[string]string
}
// File name constants from the tar spec.
const (
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
)
// FileInfo returns an os.FileInfo for the Header.
func (h *Header) FileInfo() os.FileInfo {
return headerFileInfo{h}
}
// headerFileInfo implements os.FileInfo.
type headerFileInfo struct {
h *Header
}
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
func (fi headerFileInfo) Sys() interface{} { return fi.h }
// Name returns the base name of the file.
func (fi headerFileInfo) Name() string {
if fi.IsDir() {
return path.Base(path.Clean(fi.h.Name))
}
return path.Base(fi.h.Name)
}
// Mode returns the permission and mode bits for the headerFileInfo.
func (fi headerFileInfo) Mode() (mode os.FileMode) {
// Set file permission bits.
mode = os.FileMode(fi.h.Mode).Perm()
// Set setuid, setgid and sticky bits.
if fi.h.Mode&c_ISUID != 0 {
// setuid
mode |= os.ModeSetuid
}
if fi.h.Mode&c_ISGID != 0 {
// setgid
mode |= os.ModeSetgid
}
if fi.h.Mode&c_ISVTX != 0 {
// sticky
mode |= os.ModeSticky
}
// Set file mode bits.
// clear perm, setuid, setgid and sticky bits.
m := os.FileMode(fi.h.Mode) &^ 07777
if m == c_ISDIR {
// directory
mode |= os.ModeDir
}
if m == c_ISFIFO {
// named pipe (FIFO)
mode |= os.ModeNamedPipe
}
if m == c_ISLNK {
// symbolic link
mode |= os.ModeSymlink
}
if m == c_ISBLK {
// device file
mode |= os.ModeDevice
}
if m == c_ISCHR {
// Unix character device
mode |= os.ModeDevice
mode |= os.ModeCharDevice
}
if m == c_ISSOCK {
// Unix domain socket
mode |= os.ModeSocket
}
switch fi.h.Typeflag {
case TypeSymlink:
// symbolic link
mode |= os.ModeSymlink
case TypeChar:
// character device node
mode |= os.ModeDevice
mode |= os.ModeCharDevice
case TypeBlock:
// block device node
mode |= os.ModeDevice
case TypeDir:
// directory
mode |= os.ModeDir
case TypeFifo:
// fifo node
mode |= os.ModeNamedPipe
}
return mode
}
// sysStat, if non-nil, populates h from system-dependent fields of fi.
var sysStat func(fi os.FileInfo, h *Header) error
// Mode constants from the tar spec.
const (
c_ISUID = 04000 // Set uid
c_ISGID = 02000 // Set gid
c_ISVTX = 01000 // Save text (sticky bit)
c_ISDIR = 040000 // Directory
c_ISFIFO = 010000 // FIFO
c_ISREG = 0100000 // Regular file
c_ISLNK = 0120000 // Symbolic link
c_ISBLK = 060000 // Block special file
c_ISCHR = 020000 // Character special file
c_ISSOCK = 0140000 // Socket
)
// Keywords for the PAX Extended Header
const (
paxAtime = "atime"
paxCharset = "charset"
paxComment = "comment"
paxCtime = "ctime" // please note that ctime is not a valid pax header.
paxGid = "gid"
paxGname = "gname"
paxLinkpath = "linkpath"
paxMtime = "mtime"
paxPath = "path"
paxSize = "size"
paxUid = "uid"
paxUname = "uname"
paxXattr = "SCHILY.xattr."
paxNone = ""
)
// FileInfoHeader creates a partially-populated Header from fi.
// If fi describes a symlink, FileInfoHeader records link as the link target.
// If fi describes a directory, a slash is appended to the name.
// Because os.FileInfo's Name method returns only the base name of
// the file it describes, it may be necessary to modify the Name field
// of the returned header to provide the full path name of the file.
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
if fi == nil {
return nil, errors.New("tar: FileInfo is nil")
}
fm := fi.Mode()
h := &Header{
Name: fi.Name(),
ModTime: fi.ModTime(),
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
}
switch {
case fm.IsRegular():
h.Mode |= c_ISREG
h.Typeflag = TypeReg
h.Size = fi.Size()
case fi.IsDir():
h.Typeflag = TypeDir
h.Mode |= c_ISDIR
h.Name += "/"
case fm&os.ModeSymlink != 0:
h.Typeflag = TypeSymlink
h.Mode |= c_ISLNK
h.Linkname = link
case fm&os.ModeDevice != 0:
if fm&os.ModeCharDevice != 0 {
h.Mode |= c_ISCHR
h.Typeflag = TypeChar
} else {
h.Mode |= c_ISBLK
h.Typeflag = TypeBlock
}
case fm&os.ModeNamedPipe != 0:
h.Typeflag = TypeFifo
h.Mode |= c_ISFIFO
case fm&os.ModeSocket != 0:
h.Mode |= c_ISSOCK
default:
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
}
if fm&os.ModeSetuid != 0 {
h.Mode |= c_ISUID
}
if fm&os.ModeSetgid != 0 {
h.Mode |= c_ISGID
}
if fm&os.ModeSticky != 0 {
h.Mode |= c_ISVTX
}
// If possible, populate additional fields from OS-specific
// FileInfo fields.
if sys, ok := fi.Sys().(*Header); ok {
// This FileInfo came from a Header (not the OS). Use the
// original Header to populate all remaining fields.
h.Uid = sys.Uid
h.Gid = sys.Gid
h.Uname = sys.Uname
h.Gname = sys.Gname
h.AccessTime = sys.AccessTime
h.ChangeTime = sys.ChangeTime
if sys.Xattrs != nil {
h.Xattrs = make(map[string]string)
for k, v := range sys.Xattrs {
h.Xattrs[k] = v
}
}
if sys.Typeflag == TypeLink {
// hard link
h.Typeflag = TypeLink
h.Size = 0
h.Linkname = sys.Linkname
}
}
if sysStat != nil {
return h, sysStat(fi, h)
}
return h, nil
}
var zeroBlock = make([]byte, blockSize)
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
// We compute and return both.
func checksum(header []byte) (unsigned int64, signed int64) {
for i := 0; i < len(header); i++ {
if i == 148 {
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
unsigned += ' ' * 8
signed += ' ' * 8
i += 7
continue
}
unsigned += int64(header[i])
signed += int64(int8(header[i]))
}
return
}
type slicer []byte
func (sp *slicer) next(n int) (b []byte) {
s := *sp
b, *sp = s[0:n], s[n:]
return
}
func isASCII(s string) bool {
for _, c := range s {
if c >= 0x80 {
return false
}
}
return true
}
func toASCII(s string) string {
if isASCII(s) {
return s
}
var buf bytes.Buffer
for _, c := range s {
if c < 0x80 {
buf.WriteByte(byte(c))
}
}
return buf.String()
}

View File

@@ -1,943 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
// TODO(dsymonds):
// - pax extensions
import (
"bytes"
"errors"
"io"
"io/ioutil"
"os"
"strconv"
"strings"
"time"
)
var (
ErrHeader = errors.New("archive/tar: invalid tar header")
)
const maxNanoSecondIntSize = 9
// A Reader provides sequential access to the contents of a tar archive.
// A tar archive consists of a sequence of files.
// The Next method advances to the next file in the archive (including the first),
// and then it can be treated as an io.Reader to access the file's data.
type Reader struct {
r io.Reader
err error
pad int64 // amount of padding (ignored) after current file entry
curr numBytesReader // reader for current file entry
hdrBuff [blockSize]byte // buffer to use in readHeader
RawAccounting bool // Whether to enable the access needed to reassemble the tar from raw bytes. Some performance/memory hit for this.
rawBytes *bytes.Buffer // last raw bits
}
// RawBytes accesses the raw bytes of the archive, apart from the file payload itself.
// This includes the header and padding.
//
// This call resets the current rawbytes buffer
//
// Only when RawAccounting is enabled, otherwise this returns nil
func (tr *Reader) RawBytes() []byte {
if !tr.RawAccounting {
return nil
}
if tr.rawBytes == nil {
tr.rawBytes = bytes.NewBuffer(nil)
}
// if we've read them, then flush them.
defer tr.rawBytes.Reset()
return tr.rawBytes.Bytes()
}
// A numBytesReader is an io.Reader with a numBytes method, returning the number
// of bytes remaining in the underlying encoded data.
type numBytesReader interface {
io.Reader
numBytes() int64
}
// A regFileReader is a numBytesReader for reading file data from a tar archive.
type regFileReader struct {
r io.Reader // underlying reader
nb int64 // number of unread bytes for current file entry
}
// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive.
type sparseFileReader struct {
rfr *regFileReader // reads the sparse-encoded file data
sp []sparseEntry // the sparse map for the file
pos int64 // keeps track of file position
tot int64 // total size of the file
}
// Keywords for GNU sparse files in a PAX extended header
const (
paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
paxGNUSparseOffset = "GNU.sparse.offset"
paxGNUSparseNumBytes = "GNU.sparse.numbytes"
paxGNUSparseMap = "GNU.sparse.map"
paxGNUSparseName = "GNU.sparse.name"
paxGNUSparseMajor = "GNU.sparse.major"
paxGNUSparseMinor = "GNU.sparse.minor"
paxGNUSparseSize = "GNU.sparse.size"
paxGNUSparseRealSize = "GNU.sparse.realsize"
)
// Keywords for old GNU sparse headers
const (
oldGNUSparseMainHeaderOffset = 386
oldGNUSparseMainHeaderIsExtendedOffset = 482
oldGNUSparseMainHeaderNumEntries = 4
oldGNUSparseExtendedHeaderIsExtendedOffset = 504
oldGNUSparseExtendedHeaderNumEntries = 21
oldGNUSparseOffsetSize = 12
oldGNUSparseNumBytesSize = 12
)
// NewReader creates a new Reader reading from r.
func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
// Next advances to the next entry in the tar archive.
//
// io.EOF is returned at the end of the input.
func (tr *Reader) Next() (*Header, error) {
var hdr *Header
if tr.RawAccounting {
if tr.rawBytes == nil {
tr.rawBytes = bytes.NewBuffer(nil)
} else {
tr.rawBytes.Reset()
}
}
if tr.err == nil {
tr.skipUnread()
}
if tr.err != nil {
return hdr, tr.err
}
hdr = tr.readHeader()
if hdr == nil {
return hdr, tr.err
}
// Check for PAX/GNU header.
switch hdr.Typeflag {
case TypeXHeader:
// PAX extended header
headers, err := parsePAX(tr)
if err != nil {
return nil, err
}
// We actually read the whole file,
// but this skips alignment padding
tr.skipUnread()
if tr.err != nil {
return nil, tr.err
}
hdr = tr.readHeader()
if hdr == nil {
return nil, tr.err
}
mergePAX(hdr, headers)
// Check for a PAX format sparse file
sp, err := tr.checkForGNUSparsePAXHeaders(hdr, headers)
if err != nil {
tr.err = err
return nil, err
}
if sp != nil {
// Current file is a PAX format GNU sparse file.
// Set the current file reader to a sparse file reader.
tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
}
return hdr, nil
case TypeGNULongName:
// We have a GNU long name header. Its contents are the real file name.
realname, err := ioutil.ReadAll(tr)
if err != nil {
return nil, err
}
var buf []byte
if tr.RawAccounting {
if _, err = tr.rawBytes.Write(realname); err != nil {
return nil, err
}
buf = make([]byte, tr.rawBytes.Len())
copy(buf[:], tr.RawBytes())
}
hdr, err := tr.Next()
// since the above call to Next() resets the buffer, we need to throw the bytes over
if tr.RawAccounting {
buf = append(buf, tr.RawBytes()...)
if _, err = tr.rawBytes.Write(buf); err != nil {
return nil, err
}
}
hdr.Name = cString(realname)
return hdr, err
case TypeGNULongLink:
// We have a GNU long link header.
realname, err := ioutil.ReadAll(tr)
if err != nil {
return nil, err
}
var buf []byte
if tr.RawAccounting {
if _, err = tr.rawBytes.Write(realname); err != nil {
return nil, err
}
buf = make([]byte, tr.rawBytes.Len())
copy(buf[:], tr.RawBytes())
}
hdr, err := tr.Next()
// since the above call to Next() resets the buffer, we need to throw the bytes over
if tr.RawAccounting {
buf = append(buf, tr.RawBytes()...)
if _, err = tr.rawBytes.Write(buf); err != nil {
return nil, err
}
}
hdr.Linkname = cString(realname)
return hdr, err
}
return hdr, tr.err
}
// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
// be treated as a regular file.
func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
var sparseFormat string
// Check for sparse format indicators
major, majorOk := headers[paxGNUSparseMajor]
minor, minorOk := headers[paxGNUSparseMinor]
sparseName, sparseNameOk := headers[paxGNUSparseName]
_, sparseMapOk := headers[paxGNUSparseMap]
sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
// Identify which, if any, sparse format applies from which PAX headers are set
if majorOk && minorOk {
sparseFormat = major + "." + minor
} else if sparseNameOk && sparseMapOk {
sparseFormat = "0.1"
} else if sparseSizeOk {
sparseFormat = "0.0"
} else {
// Not a PAX format GNU sparse file.
return nil, nil
}
// Check for unknown sparse format
if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
return nil, nil
}
// Update hdr from GNU sparse PAX headers
if sparseNameOk {
hdr.Name = sparseName
}
if sparseSizeOk {
realSize, err := strconv.ParseInt(sparseSize, 10, 0)
if err != nil {
return nil, ErrHeader
}
hdr.Size = realSize
} else if sparseRealSizeOk {
realSize, err := strconv.ParseInt(sparseRealSize, 10, 0)
if err != nil {
return nil, ErrHeader
}
hdr.Size = realSize
}
// Set up the sparse map, according to the particular sparse format in use
var sp []sparseEntry
var err error
switch sparseFormat {
case "0.0", "0.1":
sp, err = readGNUSparseMap0x1(headers)
case "1.0":
sp, err = readGNUSparseMap1x0(tr.curr)
}
return sp, err
}
// mergePAX merges well known headers according to PAX standard.
// In general headers with the same name as those found
// in the header struct overwrite those found in the header
// struct with higher precision or longer values. Esp. useful
// for name and linkname fields.
func mergePAX(hdr *Header, headers map[string]string) error {
for k, v := range headers {
switch k {
case paxPath:
hdr.Name = v
case paxLinkpath:
hdr.Linkname = v
case paxGname:
hdr.Gname = v
case paxUname:
hdr.Uname = v
case paxUid:
uid, err := strconv.ParseInt(v, 10, 0)
if err != nil {
return err
}
hdr.Uid = int(uid)
case paxGid:
gid, err := strconv.ParseInt(v, 10, 0)
if err != nil {
return err
}
hdr.Gid = int(gid)
case paxAtime:
t, err := parsePAXTime(v)
if err != nil {
return err
}
hdr.AccessTime = t
case paxMtime:
t, err := parsePAXTime(v)
if err != nil {
return err
}
hdr.ModTime = t
case paxCtime:
t, err := parsePAXTime(v)
if err != nil {
return err
}
hdr.ChangeTime = t
case paxSize:
size, err := strconv.ParseInt(v, 10, 0)
if err != nil {
return err
}
hdr.Size = int64(size)
default:
if strings.HasPrefix(k, paxXattr) {
if hdr.Xattrs == nil {
hdr.Xattrs = make(map[string]string)
}
hdr.Xattrs[k[len(paxXattr):]] = v
}
}
}
return nil
}
// parsePAXTime takes a string of the form %d.%d as described in
// the PAX specification.
func parsePAXTime(t string) (time.Time, error) {
buf := []byte(t)
pos := bytes.IndexByte(buf, '.')
var seconds, nanoseconds int64
var err error
if pos == -1 {
seconds, err = strconv.ParseInt(t, 10, 0)
if err != nil {
return time.Time{}, err
}
} else {
seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
if err != nil {
return time.Time{}, err
}
nano_buf := string(buf[pos+1:])
// Pad as needed before converting to a decimal.
// For example .030 -> .030000000 -> 30000000 nanoseconds
if len(nano_buf) < maxNanoSecondIntSize {
// Right pad
nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
} else if len(nano_buf) > maxNanoSecondIntSize {
// Right truncate
nano_buf = nano_buf[:maxNanoSecondIntSize]
}
nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
if err != nil {
return time.Time{}, err
}
}
ts := time.Unix(seconds, nanoseconds)
return ts, nil
}
// parsePAX parses PAX headers.
// If an extended header (type 'x') is invalid, ErrHeader is returned
func parsePAX(r io.Reader) (map[string]string, error) {
buf, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
// leaving this function for io.Reader makes it more testable
if tr, ok := r.(*Reader); ok && tr.RawAccounting {
if _, err = tr.rawBytes.Write(buf); err != nil {
return nil, err
}
}
// For GNU PAX sparse format 0.0 support.
// This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
var sparseMap bytes.Buffer
headers := make(map[string]string)
// Each record is constructed as
// "%d %s=%s\n", length, keyword, value
for len(buf) > 0 {
// or the header was empty to start with.
var sp int
// The size field ends at the first space.
sp = bytes.IndexByte(buf, ' ')
if sp == -1 {
return nil, ErrHeader
}
// Parse the first token as a decimal integer.
n, err := strconv.ParseInt(string(buf[:sp]), 10, 0)
if err != nil || n < 5 || int64(len(buf)) < n {
return nil, ErrHeader
}
// Extract everything between the decimal and the n -1 on the
// beginning to eat the ' ', -1 on the end to skip the newline.
var record []byte
record, buf = buf[sp+1:n-1], buf[n:]
// The first equals is guaranteed to mark the end of the key.
// Everything else is value.
eq := bytes.IndexByte(record, '=')
if eq == -1 {
return nil, ErrHeader
}
key, value := record[:eq], record[eq+1:]
keyStr := string(key)
if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
// GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
sparseMap.Write(value)
sparseMap.Write([]byte{','})
} else {
// Normal key. Set the value in the headers map.
headers[keyStr] = string(value)
}
}
if sparseMap.Len() != 0 {
// Add sparse info to headers, chopping off the extra comma
sparseMap.Truncate(sparseMap.Len() - 1)
headers[paxGNUSparseMap] = sparseMap.String()
}
return headers, nil
}
// cString parses bytes as a NUL-terminated C-style string.
// If a NUL byte is not found then the whole slice is returned as a string.
func cString(b []byte) string {
n := 0
for n < len(b) && b[n] != 0 {
n++
}
return string(b[0:n])
}
func (tr *Reader) octal(b []byte) int64 {
// Check for binary format first.
if len(b) > 0 && b[0]&0x80 != 0 {
var x int64
for i, c := range b {
if i == 0 {
c &= 0x7f // ignore signal bit in first byte
}
x = x<<8 | int64(c)
}
return x
}
// Because unused fields are filled with NULs, we need
// to skip leading NULs. Fields may also be padded with
// spaces or NULs.
// So we remove leading and trailing NULs and spaces to
// be sure.
b = bytes.Trim(b, " \x00")
if len(b) == 0 {
return 0
}
x, err := strconv.ParseUint(cString(b), 8, 64)
if err != nil {
tr.err = err
}
return int64(x)
}
// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding.
func (tr *Reader) skipUnread() {
nr := tr.numBytes() + tr.pad // number of bytes to skip
tr.curr, tr.pad = nil, 0
if tr.RawAccounting {
_, tr.err = io.CopyN(tr.rawBytes, tr.r, nr)
return
}
if sr, ok := tr.r.(io.Seeker); ok {
if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil {
return
}
}
_, tr.err = io.CopyN(ioutil.Discard, tr.r, nr)
}
func (tr *Reader) verifyChecksum(header []byte) bool {
if tr.err != nil {
return false
}
given := tr.octal(header[148:156])
unsigned, signed := checksum(header)
return given == unsigned || given == signed
}
func (tr *Reader) readHeader() *Header {
header := tr.hdrBuff[:]
copy(header, zeroBlock)
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
// because it could read some of the block, but reach EOF first
if tr.err == io.EOF && tr.RawAccounting {
if _, tr.err = tr.rawBytes.Write(header); tr.err != nil {
return nil
}
}
return nil
}
if tr.RawAccounting {
if _, tr.err = tr.rawBytes.Write(header); tr.err != nil {
return nil
}
}
// Two blocks of zero bytes marks the end of the archive.
if bytes.Equal(header, zeroBlock[0:blockSize]) {
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
// because it could read some of the block, but reach EOF first
if tr.err == io.EOF && tr.RawAccounting {
if _, tr.err = tr.rawBytes.Write(header); tr.err != nil {
return nil
}
}
return nil
}
if tr.RawAccounting {
if _, tr.err = tr.rawBytes.Write(header); tr.err != nil {
return nil
}
}
if bytes.Equal(header, zeroBlock[0:blockSize]) {
tr.err = io.EOF
} else {
tr.err = ErrHeader // zero block and then non-zero block
}
return nil
}
if !tr.verifyChecksum(header) {
tr.err = ErrHeader
return nil
}
// Unpack
hdr := new(Header)
s := slicer(header)
hdr.Name = cString(s.next(100))
hdr.Mode = tr.octal(s.next(8))
hdr.Uid = int(tr.octal(s.next(8)))
hdr.Gid = int(tr.octal(s.next(8)))
hdr.Size = tr.octal(s.next(12))
if hdr.Size < 0 {
tr.err = ErrHeader
return nil
}
hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0)
s.next(8) // chksum
hdr.Typeflag = s.next(1)[0]
hdr.Linkname = cString(s.next(100))
// The remainder of the header depends on the value of magic.
// The original (v7) version of tar had no explicit magic field,
// so its magic bytes, like the rest of the block, are NULs.
magic := string(s.next(8)) // contains version field as well.
var format string
switch {
case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
if string(header[508:512]) == "tar\x00" {
format = "star"
} else {
format = "posix"
}
case magic == "ustar \x00": // old GNU tar
format = "gnu"
}
switch format {
case "posix", "gnu", "star":
hdr.Uname = cString(s.next(32))
hdr.Gname = cString(s.next(32))
devmajor := s.next(8)
devminor := s.next(8)
if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
hdr.Devmajor = tr.octal(devmajor)
hdr.Devminor = tr.octal(devminor)
}
var prefix string
switch format {
case "posix", "gnu":
prefix = cString(s.next(155))
case "star":
prefix = cString(s.next(131))
hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0)
hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0)
}
if len(prefix) > 0 {
hdr.Name = prefix + "/" + hdr.Name
}
}
if tr.err != nil {
tr.err = ErrHeader
return nil
}
// Maximum value of hdr.Size is 64 GB (12 octal digits),
// so there's no risk of int64 overflowing.
nb := int64(hdr.Size)
tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
// Set the current file reader.
tr.curr = &regFileReader{r: tr.r, nb: nb}
// Check for old GNU sparse format entry.
if hdr.Typeflag == TypeGNUSparse {
// Get the real size of the file.
hdr.Size = tr.octal(header[483:495])
// Read the sparse map.
sp := tr.readOldGNUSparseMap(header)
if tr.err != nil {
return nil
}
// Current file is a GNU sparse file. Update the current file reader.
tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size}
}
return hdr
}
// A sparseEntry holds a single entry in a sparse file's sparse map.
// A sparse entry indicates the offset and size in a sparse file of a
// block of data.
type sparseEntry struct {
offset int64
numBytes int64
}
// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
// then one or more extension headers are used to store the rest of the sparse map.
func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
spCap := oldGNUSparseMainHeaderNumEntries
if isExtended {
spCap += oldGNUSparseExtendedHeaderNumEntries
}
sp := make([]sparseEntry, 0, spCap)
s := slicer(header[oldGNUSparseMainHeaderOffset:])
// Read the four entries from the main tar header
for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
offset := tr.octal(s.next(oldGNUSparseOffsetSize))
numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
if tr.err != nil {
tr.err = ErrHeader
return nil
}
if offset == 0 && numBytes == 0 {
break
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
for isExtended {
// There are more entries. Read an extension header and parse its entries.
sparseHeader := make([]byte, blockSize)
if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
return nil
}
if tr.RawAccounting {
if _, tr.err = tr.rawBytes.Write(sparseHeader); tr.err != nil {
return nil
}
}
isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
s = slicer(sparseHeader)
for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
offset := tr.octal(s.next(oldGNUSparseOffsetSize))
numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize))
if tr.err != nil {
tr.err = ErrHeader
return nil
}
if offset == 0 && numBytes == 0 {
break
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
}
return sp
}
// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format version 1.0.
// The sparse map is stored just before the file data and padded out to the nearest block boundary.
func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
buf := make([]byte, 2*blockSize)
sparseHeader := buf[:blockSize]
// readDecimal is a helper function to read a decimal integer from the sparse map
// while making sure to read from the file in blocks of size blockSize
readDecimal := func() (int64, error) {
// Look for newline
nl := bytes.IndexByte(sparseHeader, '\n')
if nl == -1 {
if len(sparseHeader) >= blockSize {
// This is an error
return 0, ErrHeader
}
oldLen := len(sparseHeader)
newLen := oldLen + blockSize
if cap(sparseHeader) < newLen {
// There's more header, but we need to make room for the next block
copy(buf, sparseHeader)
sparseHeader = buf[:newLen]
} else {
// There's more header, and we can just reslice
sparseHeader = sparseHeader[:newLen]
}
// Now that sparseHeader is large enough, read next block
if _, err := io.ReadFull(r, sparseHeader[oldLen:newLen]); err != nil {
return 0, err
}
// leaving this function for io.Reader makes it more testable
if tr, ok := r.(*Reader); ok && tr.RawAccounting {
if _, err := tr.rawBytes.Write(sparseHeader[oldLen:newLen]); err != nil {
return 0, err
}
}
// Look for a newline in the new data
nl = bytes.IndexByte(sparseHeader[oldLen:newLen], '\n')
if nl == -1 {
// This is an error
return 0, ErrHeader
}
nl += oldLen // We want the position from the beginning
}
// Now that we've found a newline, read a number
n, err := strconv.ParseInt(string(sparseHeader[:nl]), 10, 0)
if err != nil {
return 0, ErrHeader
}
// Update sparseHeader to consume this number
sparseHeader = sparseHeader[nl+1:]
return n, nil
}
// Read the first block
if _, err := io.ReadFull(r, sparseHeader); err != nil {
return nil, err
}
// leaving this function for io.Reader makes it more testable
if tr, ok := r.(*Reader); ok && tr.RawAccounting {
if _, err := tr.rawBytes.Write(sparseHeader); err != nil {
return nil, err
}
}
// The first line contains the number of entries
numEntries, err := readDecimal()
if err != nil {
return nil, err
}
// Read all the entries
sp := make([]sparseEntry, 0, numEntries)
for i := int64(0); i < numEntries; i++ {
// Read the offset
offset, err := readDecimal()
if err != nil {
return nil, err
}
// Read numBytes
numBytes, err := readDecimal()
if err != nil {
return nil, err
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
return sp, nil
}
// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1.
// The sparse map is stored in the PAX headers.
func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) {
// Get number of entries
numEntriesStr, ok := headers[paxGNUSparseNumBlocks]
if !ok {
return nil, ErrHeader
}
numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0)
if err != nil {
return nil, ErrHeader
}
sparseMap := strings.Split(headers[paxGNUSparseMap], ",")
// There should be two numbers in sparseMap for each entry
if int64(len(sparseMap)) != 2*numEntries {
return nil, ErrHeader
}
// Loop through the entries in the sparse map
sp := make([]sparseEntry, 0, numEntries)
for i := int64(0); i < numEntries; i++ {
offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0)
if err != nil {
return nil, ErrHeader
}
numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0)
if err != nil {
return nil, ErrHeader
}
sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
}
return sp, nil
}
// numBytes returns the number of bytes left to read in the current file's entry
// in the tar archive, or 0 if there is no current file.
func (tr *Reader) numBytes() int64 {
if tr.curr == nil {
// No current file, so no bytes
return 0
}
return tr.curr.numBytes()
}
// Read reads from the current entry in the tar archive.
// It returns 0, io.EOF when it reaches the end of that entry,
// until Next is called to advance to the next entry.
func (tr *Reader) Read(b []byte) (n int, err error) {
if tr.curr == nil {
return 0, io.EOF
}
n, err = tr.curr.Read(b)
if err != nil && err != io.EOF {
tr.err = err
}
return
}
func (rfr *regFileReader) Read(b []byte) (n int, err error) {
if rfr.nb == 0 {
// file consumed
return 0, io.EOF
}
if int64(len(b)) > rfr.nb {
b = b[0:rfr.nb]
}
n, err = rfr.r.Read(b)
rfr.nb -= int64(n)
if err == io.EOF && rfr.nb > 0 {
err = io.ErrUnexpectedEOF
}
return
}
// numBytes returns the number of bytes left to read in the file's data in the tar archive.
func (rfr *regFileReader) numBytes() int64 {
return rfr.nb
}
// readHole reads a sparse file hole ending at offset toOffset
func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int {
n64 := toOffset - sfr.pos
if n64 > int64(len(b)) {
n64 = int64(len(b))
}
n := int(n64)
for i := 0; i < n; i++ {
b[i] = 0
}
sfr.pos += n64
return n
}
// Read reads the sparse file data in expanded form.
func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
if len(sfr.sp) == 0 {
// No more data fragments to read from.
if sfr.pos < sfr.tot {
// We're in the last hole
n = sfr.readHole(b, sfr.tot)
return
}
// Otherwise, we're at the end of the file
return 0, io.EOF
}
if sfr.tot < sfr.sp[0].offset {
return 0, io.ErrUnexpectedEOF
}
if sfr.pos < sfr.sp[0].offset {
// We're in a hole
n = sfr.readHole(b, sfr.sp[0].offset)
return
}
// We're not in a hole, so we'll read from the next data fragment
posInFragment := sfr.pos - sfr.sp[0].offset
bytesLeft := sfr.sp[0].numBytes - posInFragment
if int64(len(b)) > bytesLeft {
b = b[0:bytesLeft]
}
n, err = sfr.rfr.Read(b)
sfr.pos += int64(n)
if int64(n) == bytesLeft {
// We're done with this fragment
sfr.sp = sfr.sp[1:]
}
if err == io.EOF && sfr.pos < sfr.tot {
// We reached the end of the last fragment's data, but there's a final hole
err = nil
}
return
}
// numBytes returns the number of bytes left to read in the sparse file's
// sparse-encoded data in the tar archive.
func (sfr *sparseFileReader) numBytes() int64 {
return sfr.rfr.nb
}

View File

@@ -1,20 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux dragonfly openbsd solaris
package tar
import (
"syscall"
"time"
)
func statAtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Atim.Unix())
}
func statCtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Ctim.Unix())
}

View File

@@ -1,20 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd netbsd
package tar
import (
"syscall"
"time"
)
func statAtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Atimespec.Unix())
}
func statCtime(st *syscall.Stat_t) time.Time {
return time.Unix(st.Ctimespec.Unix())
}

View File

@@ -1,32 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
package tar
import (
"os"
"syscall"
)
func init() {
sysStat = statUnix
}
func statUnix(fi os.FileInfo, h *Header) error {
sys, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
h.Uid = int(sys.Uid)
h.Gid = int(sys.Gid)
// TODO(bradfitz): populate username & group. os/user
// doesn't cache LookupId lookups, and lacks group
// lookup functions.
h.AccessTime = statAtime(sys)
h.ChangeTime = statCtime(sys)
// TODO(bradfitz): major/minor device numbers?
return nil
}

View File

@@ -1,396 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
// TODO(dsymonds):
// - catch more errors (no first header, etc.)
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"path"
"strconv"
"strings"
"time"
)
var (
ErrWriteTooLong = errors.New("archive/tar: write too long")
ErrFieldTooLong = errors.New("archive/tar: header field too long")
ErrWriteAfterClose = errors.New("archive/tar: write after close")
errNameTooLong = errors.New("archive/tar: name too long")
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
)
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
// A tar archive consists of a sequence of files.
// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
// writing at most hdr.Size bytes in total.
type Writer struct {
w io.Writer
err error
nb int64 // number of unwritten bytes for current file entry
pad int64 // amount of padding to write after current file entry
closed bool
usedBinary bool // whether the binary numeric field extension was used
preferPax bool // use pax header instead of binary numeric header
hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
}
// NewWriter creates a new Writer writing to w.
func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
// Flush finishes writing the current file (optional).
func (tw *Writer) Flush() error {
if tw.nb > 0 {
tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
return tw.err
}
n := tw.nb + tw.pad
for n > 0 && tw.err == nil {
nr := n
if nr > blockSize {
nr = blockSize
}
var nw int
nw, tw.err = tw.w.Write(zeroBlock[0:nr])
n -= int64(nw)
}
tw.nb = 0
tw.pad = 0
return tw.err
}
// Write s into b, terminating it with a NUL if there is room.
// If the value is too long for the field and allowPax is true add a paxheader record instead
func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s)
if needsPaxHeader {
paxHeaders[paxKeyword] = s
return
}
if len(s) > len(b) {
if tw.err == nil {
tw.err = ErrFieldTooLong
}
return
}
ascii := toASCII(s)
copy(b, ascii)
if len(ascii) < len(b) {
b[len(ascii)] = 0
}
}
// Encode x as an octal ASCII string and write it into b with leading zeros.
func (tw *Writer) octal(b []byte, x int64) {
s := strconv.FormatInt(x, 8)
// leading zeros, but leave room for a NUL.
for len(s)+1 < len(b) {
s = "0" + s
}
tw.cString(b, s, false, paxNone, nil)
}
// Write x into b, either as octal or as binary (GNUtar/star extension).
// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead
func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) {
// Try octal first.
s := strconv.FormatInt(x, 8)
if len(s) < len(b) {
tw.octal(b, x)
return
}
// If it is too long for octal, and pax is preferred, use a pax header
if allowPax && tw.preferPax {
tw.octal(b, 0)
s := strconv.FormatInt(x, 10)
paxHeaders[paxKeyword] = s
return
}
// Too big: use binary (big-endian).
tw.usedBinary = true
for i := len(b) - 1; x > 0 && i >= 0; i-- {
b[i] = byte(x)
x >>= 8
}
b[0] |= 0x80 // highest bit indicates binary format
}
var (
minTime = time.Unix(0, 0)
// There is room for 11 octal digits (33 bits) of mtime.
maxTime = minTime.Add((1<<33 - 1) * time.Second)
)
// WriteHeader writes hdr and prepares to accept the file's contents.
// WriteHeader calls Flush if it is not the first header.
// Calling after a Close will return ErrWriteAfterClose.
func (tw *Writer) WriteHeader(hdr *Header) error {
return tw.writeHeader(hdr, true)
}
// WriteHeader writes hdr and prepares to accept the file's contents.
// WriteHeader calls Flush if it is not the first header.
// Calling after a Close will return ErrWriteAfterClose.
// As this method is called internally by writePax header to allow it to
// suppress writing the pax header.
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
if tw.closed {
return ErrWriteAfterClose
}
if tw.err == nil {
tw.Flush()
}
if tw.err != nil {
return tw.err
}
// a map to hold pax header records, if any are needed
paxHeaders := make(map[string]string)
// TODO(shanemhansen): we might want to use PAX headers for
// subsecond time resolution, but for now let's just capture
// too long fields or non ascii characters
var header []byte
// We need to select which scratch buffer to use carefully,
// since this method is called recursively to write PAX headers.
// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
// already being used by the non-recursive call, so we must use paxHdrBuff.
header = tw.hdrBuff[:]
if !allowPax {
header = tw.paxHdrBuff[:]
}
copy(header, zeroBlock)
s := slicer(header)
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
pathHeaderBytes := s.next(fileNameSize)
tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders)
// Handle out of range ModTime carefully.
var modTime int64
if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
modTime = hdr.ModTime.Unix()
}
tw.octal(s.next(8), hdr.Mode) // 100:108
tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116
tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124
tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136
tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity
s.next(8) // chksum (148:156)
s.next(1)[0] = hdr.Typeflag // 156:157
tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders)
copy(s.next(8), []byte("ustar\x0000")) // 257:265
tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297
tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329
tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337
tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
prefixHeaderBytes := s.next(155)
tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
if tw.usedBinary {
copy(header[257:265], []byte("ustar \x00"))
}
_, paxPathUsed := paxHeaders[paxPath]
// try to use a ustar header when only the name is too long
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
suffix := hdr.Name
prefix := ""
if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) {
var err error
prefix, suffix, err = tw.splitUSTARLongName(hdr.Name)
if err == nil {
// ok we can use a ustar long name instead of pax, now correct the fields
// remove the path field from the pax header. this will suppress the pax header
delete(paxHeaders, paxPath)
// update the path fields
tw.cString(pathHeaderBytes, suffix, false, paxNone, nil)
tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil)
// Use the ustar magic if we used ustar long names.
if len(prefix) > 0 && !tw.usedBinary {
copy(header[257:265], []byte("ustar\x00"))
}
}
}
}
// The chksum field is terminated by a NUL and a space.
// This is different from the other octal fields.
chksum, _ := checksum(header)
tw.octal(header[148:155], chksum)
header[155] = ' '
if tw.err != nil {
// problem with header; probably integer too big for a field.
return tw.err
}
if allowPax {
for k, v := range hdr.Xattrs {
paxHeaders[paxXattr+k] = v
}
}
if len(paxHeaders) > 0 {
if !allowPax {
return errInvalidHeader
}
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
return err
}
}
tw.nb = int64(hdr.Size)
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
_, tw.err = tw.w.Write(header)
return tw.err
}
// writeUSTARLongName splits a USTAR long name hdr.Name.
// name must be < 256 characters. errNameTooLong is returned
// if hdr.Name can't be split. The splitting heuristic
// is compatible with gnu tar.
func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) {
length := len(name)
if length > fileNamePrefixSize+1 {
length = fileNamePrefixSize + 1
} else if name[length-1] == '/' {
length--
}
i := strings.LastIndex(name[:length], "/")
// nlen contains the resulting length in the name field.
// plen contains the resulting length in the prefix field.
nlen := len(name) - i - 1
plen := i
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
err = errNameTooLong
return
}
prefix, suffix = name[:i], name[i+1:]
return
}
// writePaxHeader writes an extended pax header to the
// archive.
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
// Prepare extended header
ext := new(Header)
ext.Typeflag = TypeXHeader
// Setting ModTime is required for reader parsing to
// succeed, and seems harmless enough.
ext.ModTime = hdr.ModTime
// The spec asks that we namespace our pseudo files
// with the current pid.
pid := os.Getpid()
dir, file := path.Split(hdr.Name)
fullName := path.Join(dir,
fmt.Sprintf("PaxHeaders.%d", pid), file)
ascii := toASCII(fullName)
if len(ascii) > 100 {
ascii = ascii[:100]
}
ext.Name = ascii
// Construct the body
var buf bytes.Buffer
for k, v := range paxHeaders {
fmt.Fprint(&buf, paxHeader(k+"="+v))
}
ext.Size = int64(len(buf.Bytes()))
if err := tw.writeHeader(ext, false); err != nil {
return err
}
if _, err := tw.Write(buf.Bytes()); err != nil {
return err
}
if err := tw.Flush(); err != nil {
return err
}
return nil
}
// paxHeader formats a single pax record, prefixing it with the appropriate length
func paxHeader(msg string) string {
const padding = 2 // Extra padding for space and newline
size := len(msg) + padding
size += len(strconv.Itoa(size))
record := fmt.Sprintf("%d %s\n", size, msg)
if len(record) != size {
// Final adjustment if adding size increased
// the number of digits in size
size = len(record)
record = fmt.Sprintf("%d %s\n", size, msg)
}
return record
}
// Write writes to the current entry in the tar archive.
// Write returns the error ErrWriteTooLong if more than
// hdr.Size bytes are written after WriteHeader.
func (tw *Writer) Write(b []byte) (n int, err error) {
if tw.closed {
err = ErrWriteAfterClose
return
}
overwrite := false
if int64(len(b)) > tw.nb {
b = b[0:tw.nb]
overwrite = true
}
n, err = tw.w.Write(b)
tw.nb -= int64(n)
if err == nil && overwrite {
err = ErrWriteTooLong
return
}
tw.err = err
return
}
// Close closes the tar archive, flushing any unwritten
// data to the underlying writer.
func (tw *Writer) Close() error {
if tw.err != nil || tw.closed {
return tw.err
}
tw.Flush()
tw.closed = true
if tw.err != nil {
return tw.err
}
// trailer: two zero blocks
for i := 0; i < 2; i++ {
_, tw.err = tw.w.Write(zeroBlock)
if tw.err != nil {
break
}
}
return tw.err
}

View File

@@ -1,44 +0,0 @@
asm
===
This library for assembly and disassembly of tar archives, facilitated by
`github.com/vbatts/tar-split/tar/storage`.
Concerns
--------
For completely safe assembly/disassembly, there will need to be a Content
Addressable Storage (CAS) directory, that maps to a checksum in the
`storage.Entity` of `storage.FileType`.
This is due to the fact that tar archives _can_ allow multiple records for the
same path, but the last one effectively wins. Even if the prior records had a
different payload.
In this way, when assembling an archive from relative paths, if the archive has
multiple entries for the same path, then all payloads read in from a relative
path would be identical.
Thoughts
--------
Have a look-aside directory or storage. This way when a clobbering record is
encountered from the tar stream, then the payload of the prior/existing file is
stored to the CAS. This way the clobbering record's file payload can be
extracted, but we'll have preserved the payload needed to reassemble a precise
tar archive.
clobbered/path/to/file.[0-N]
*alternatively*
We could just _not_ support tar streams that have clobbering file paths.
Appending records to the archive is not incredibly common, and doesn't happen
by default for most implementations. Not supporting them wouldn't be a
security concern either, as if it did occur, we would reassemble an archive
that doesn't validate signature/checksum, so it shouldn't be trusted anyway.
Otherwise, this will allow us to defer support for appended files as a FUTURE FEATURE.

View File

@@ -1,130 +0,0 @@
package asm
import (
"bytes"
"fmt"
"hash"
"hash/crc64"
"io"
"sync"
"github.com/vbatts/tar-split/tar/storage"
)
// NewOutputTarStream returns an io.ReadCloser that is an assembled tar archive
// stream.
//
// It takes a storage.FileGetter, for mapping the file payloads that are to be read in,
// and a storage.Unpacker, which has access to the rawbytes and file order
// metadata. With the combination of these two items, a precise assembled Tar
// archive is possible.
func NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadCloser {
// ... Since these are interfaces, this is possible, so let's not have a nil pointer
if fg == nil || up == nil {
return nil
}
pr, pw := io.Pipe()
go func() {
err := WriteOutputTarStream(fg, up, pw)
if err != nil {
pw.CloseWithError(err)
} else {
pw.Close()
}
}()
return pr
}
// WriteOutputTarStream writes assembled tar archive to a writer.
func WriteOutputTarStream(fg storage.FileGetter, up storage.Unpacker, w io.Writer) error {
// ... Since these are interfaces, this is possible, so let's not have a nil pointer
if fg == nil || up == nil {
return nil
}
var copyBuffer []byte
var crcHash hash.Hash
var crcSum []byte
var multiWriter io.Writer
for {
entry, err := up.Next()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
switch entry.Type {
case storage.SegmentType:
if _, err := w.Write(entry.Payload); err != nil {
return err
}
case storage.FileType:
if entry.Size == 0 {
continue
}
fh, err := fg.Get(entry.GetName())
if err != nil {
return err
}
if crcHash == nil {
crcHash = crc64.New(storage.CRCTable)
crcSum = make([]byte, 8)
multiWriter = io.MultiWriter(w, crcHash)
copyBuffer = byteBufferPool.Get().([]byte)
defer byteBufferPool.Put(copyBuffer)
} else {
crcHash.Reset()
}
if _, err := copyWithBuffer(multiWriter, fh, copyBuffer); err != nil {
fh.Close()
return err
}
if !bytes.Equal(crcHash.Sum(crcSum[:0]), entry.Payload) {
// I would rather this be a comparable ErrInvalidChecksum or such,
// but since it's coming through the PipeReader, the context of
// _which_ file would be lost...
fh.Close()
return fmt.Errorf("file integrity checksum failed for %q", entry.GetName())
}
fh.Close()
}
}
}
var byteBufferPool = &sync.Pool{
New: func() interface{} {
return make([]byte, 32*1024)
},
}
// copyWithBuffer is taken from stdlib io.Copy implementation
// https://github.com/golang/go/blob/go1.5.1/src/io/io.go#L367
func copyWithBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) {
for {
nr, er := src.Read(buf)
if nr > 0 {
nw, ew := dst.Write(buf[0:nr])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
break
}
if nr != nw {
err = io.ErrShortWrite
break
}
}
if er == io.EOF {
break
}
if er != nil {
err = er
break
}
}
return written, err
}

View File

@@ -1,141 +0,0 @@
package asm
import (
"io"
"io/ioutil"
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/storage"
)
// NewInputTarStream wraps the Reader stream of a tar archive and provides a
// Reader stream of the same.
//
// In the middle it will pack the segments and file metadata to storage.Packer
// `p`.
//
// The the storage.FilePutter is where payload of files in the stream are
// stashed. If this stashing is not needed, you can provide a nil
// storage.FilePutter. Since the checksumming is still needed, then a default
// of NewDiscardFilePutter will be used internally
func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io.Reader, error) {
// What to do here... folks will want their own access to the Reader that is
// their tar archive stream, but we'll need that same stream to use our
// forked 'archive/tar'.
// Perhaps do an io.TeeReader that hands back an io.Reader for them to read
// from, and we'll MITM the stream to store metadata.
// We'll need a storage.FilePutter too ...
// Another concern, whether to do any storage.FilePutter operations, such that we
// don't extract any amount of the archive. But then again, we're not making
// files/directories, hardlinks, etc. Just writing the io to the storage.FilePutter.
// Perhaps we have a DiscardFilePutter that is a bit bucket.
// we'll return the pipe reader, since TeeReader does not buffer and will
// only read what the outputRdr Read's. Since Tar archives have padding on
// the end, we want to be the one reading the padding, even if the user's
// `archive/tar` doesn't care.
pR, pW := io.Pipe()
outputRdr := io.TeeReader(r, pW)
// we need a putter that will generate the crc64 sums of file payloads
if fp == nil {
fp = storage.NewDiscardFilePutter()
}
go func() {
tr := tar.NewReader(outputRdr)
tr.RawAccounting = true
for {
hdr, err := tr.Next()
if err != nil {
if err != io.EOF {
pW.CloseWithError(err)
return
}
// even when an EOF is reached, there is often 1024 null bytes on
// the end of an archive. Collect them too.
if b := tr.RawBytes(); len(b) > 0 {
_, err := p.AddEntry(storage.Entry{
Type: storage.SegmentType,
Payload: b,
})
if err != nil {
pW.CloseWithError(err)
return
}
}
break // not return. We need the end of the reader.
}
if hdr == nil {
break // not return. We need the end of the reader.
}
if b := tr.RawBytes(); len(b) > 0 {
_, err := p.AddEntry(storage.Entry{
Type: storage.SegmentType,
Payload: b,
})
if err != nil {
pW.CloseWithError(err)
return
}
}
var csum []byte
if hdr.Size > 0 {
var err error
_, csum, err = fp.Put(hdr.Name, tr)
if err != nil {
pW.CloseWithError(err)
return
}
}
entry := storage.Entry{
Type: storage.FileType,
Size: hdr.Size,
Payload: csum,
}
// For proper marshalling of non-utf8 characters
entry.SetName(hdr.Name)
// File entries added, regardless of size
_, err = p.AddEntry(entry)
if err != nil {
pW.CloseWithError(err)
return
}
if b := tr.RawBytes(); len(b) > 0 {
_, err = p.AddEntry(storage.Entry{
Type: storage.SegmentType,
Payload: b,
})
if err != nil {
pW.CloseWithError(err)
return
}
}
}
// it is allowable, and not uncommon that there is further padding on the
// end of an archive, apart from the expected 1024 null bytes.
remainder, err := ioutil.ReadAll(outputRdr)
if err != nil && err != io.EOF {
pW.CloseWithError(err)
return
}
_, err = p.AddEntry(storage.Entry{
Type: storage.SegmentType,
Payload: remainder,
})
if err != nil {
pW.CloseWithError(err)
return
}
pW.Close()
}()
return pR, nil
}

View File

@@ -1,9 +0,0 @@
/*
Package asm provides the API for streaming assembly and disassembly of tar
archives.
Using the `github.com/vbatts/tar-split/tar/storage` for Packing/Unpacking the
metadata for a stream, as well as an implementation of Getting/Putting the file
entries' payload.
*/
package asm

View File

@@ -1,12 +0,0 @@
/*
Package storage is for metadata of a tar archive.
Packing and unpacking the Entries of the stream. The types of streams are
either segments of raw bytes (for the raw headers and various padding) and for
an entry marking a file payload.
The raw bytes are stored precisely in the packed (marshalled) Entry, whereas
the file payload marker include the name of the file, size, and crc64 checksum
(for basic file integrity).
*/
package storage

View File

@@ -1,78 +0,0 @@
package storage
import "unicode/utf8"
// Entries is for sorting by Position
type Entries []Entry
func (e Entries) Len() int { return len(e) }
func (e Entries) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e Entries) Less(i, j int) bool { return e[i].Position < e[j].Position }
// Type of Entry
type Type int
const (
// FileType represents a file payload from the tar stream.
//
// This will be used to map to relative paths on disk. Only Size > 0 will get
// read into a resulting output stream (due to hardlinks).
FileType Type = 1 + iota
// SegmentType represents a raw bytes segment from the archive stream. These raw
// byte segments consist of the raw headers and various padding.
//
// Its payload is to be marshalled base64 encoded.
SegmentType
)
// Entry is the structure for packing and unpacking the information read from
// the Tar archive.
//
// FileType Payload checksum is using `hash/crc64` for basic file integrity,
// _not_ for cryptography.
// From http://www.backplane.com/matt/crc64.html, CRC32 has almost 40,000
// collisions in a sample of 18.2 million, CRC64 had none.
type Entry struct {
Type Type `json:"type"`
Name string `json:"name,omitempty"`
NameRaw []byte `json:"name_raw,omitempty"`
Size int64 `json:"size,omitempty"`
Payload []byte `json:"payload"` // SegmentType stores payload here; FileType stores crc64 checksum here;
Position int `json:"position"`
}
// SetName will check name for valid UTF-8 string, and set the appropriate
// field. See https://github.com/vbatts/tar-split/issues/17
func (e *Entry) SetName(name string) {
if utf8.ValidString(name) {
e.Name = name
} else {
e.NameRaw = []byte(name)
}
}
// SetNameBytes will check name for valid UTF-8 string, and set the appropriate
// field
func (e *Entry) SetNameBytes(name []byte) {
if utf8.Valid(name) {
e.Name = string(name)
} else {
e.NameRaw = name
}
}
// GetName returns the string for the entry's name, regardless of the field stored in
func (e *Entry) GetName() string {
if len(e.NameRaw) > 0 {
return string(e.NameRaw)
}
return e.Name
}
// GetNameBytes returns the bytes for the entry's name, regardless of the field stored in
func (e *Entry) GetNameBytes() []byte {
if len(e.NameRaw) > 0 {
return e.NameRaw
}
return []byte(e.Name)
}

View File

@@ -1,104 +0,0 @@
package storage
import (
"bytes"
"errors"
"hash/crc64"
"io"
"os"
"path/filepath"
)
// FileGetter is the interface for getting a stream of a file payload,
// addressed by name/filename. Presumably, the names will be scoped to relative
// file paths.
type FileGetter interface {
// Get returns a stream for the provided file path
Get(filename string) (output io.ReadCloser, err error)
}
// FilePutter is the interface for storing a stream of a file payload,
// addressed by name/filename.
type FilePutter interface {
// Put returns the size of the stream received, and the crc64 checksum for
// the provided stream
Put(filename string, input io.Reader) (size int64, checksum []byte, err error)
}
// FileGetPutter is the interface that groups both Getting and Putting file
// payloads.
type FileGetPutter interface {
FileGetter
FilePutter
}
// NewPathFileGetter returns a FileGetter that is for files relative to path
// relpath.
func NewPathFileGetter(relpath string) FileGetter {
return &pathFileGetter{root: relpath}
}
type pathFileGetter struct {
root string
}
func (pfg pathFileGetter) Get(filename string) (io.ReadCloser, error) {
return os.Open(filepath.Join(pfg.root, filename))
}
type bufferFileGetPutter struct {
files map[string][]byte
}
func (bfgp bufferFileGetPutter) Get(name string) (io.ReadCloser, error) {
if _, ok := bfgp.files[name]; !ok {
return nil, errors.New("no such file")
}
b := bytes.NewBuffer(bfgp.files[name])
return &readCloserWrapper{b}, nil
}
func (bfgp *bufferFileGetPutter) Put(name string, r io.Reader) (int64, []byte, error) {
crc := crc64.New(CRCTable)
buf := bytes.NewBuffer(nil)
cw := io.MultiWriter(crc, buf)
i, err := io.Copy(cw, r)
if err != nil {
return 0, nil, err
}
bfgp.files[name] = buf.Bytes()
return i, crc.Sum(nil), nil
}
type readCloserWrapper struct {
io.Reader
}
func (w *readCloserWrapper) Close() error { return nil }
// NewBufferFileGetPutter is a simple in-memory FileGetPutter
//
// Implication is this is memory intensive...
// Probably best for testing or light weight cases.
func NewBufferFileGetPutter() FileGetPutter {
return &bufferFileGetPutter{
files: map[string][]byte{},
}
}
// NewDiscardFilePutter is a bit bucket FilePutter
func NewDiscardFilePutter() FilePutter {
return &bitBucketFilePutter{}
}
type bitBucketFilePutter struct {
}
func (bbfp *bitBucketFilePutter) Put(name string, r io.Reader) (int64, []byte, error) {
c := crc64.New(CRCTable)
i, err := io.Copy(c, r)
return i, c.Sum(nil), err
}
// CRCTable is the default table used for crc64 sum calculations
var CRCTable = crc64.MakeTable(crc64.ISO)

View File

@@ -1,127 +0,0 @@
package storage
import (
"encoding/json"
"errors"
"io"
"path/filepath"
"unicode/utf8"
)
// ErrDuplicatePath occurs when a tar archive has more than one entry for the
// same file path
var ErrDuplicatePath = errors.New("duplicates of file paths not supported")
// Packer describes the methods to pack Entries to a storage destination
type Packer interface {
// AddEntry packs the Entry and returns its position
AddEntry(e Entry) (int, error)
}
// Unpacker describes the methods to read Entries from a source
type Unpacker interface {
// Next returns the next Entry being unpacked, or error, until io.EOF
Next() (*Entry, error)
}
/* TODO(vbatts) figure out a good model for this
type PackUnpacker interface {
Packer
Unpacker
}
*/
type jsonUnpacker struct {
seen seenNames
dec *json.Decoder
}
func (jup *jsonUnpacker) Next() (*Entry, error) {
var e Entry
err := jup.dec.Decode(&e)
if err != nil {
return nil, err
}
// check for dup name
if e.Type == FileType {
cName := filepath.Clean(e.GetName())
if _, ok := jup.seen[cName]; ok {
return nil, ErrDuplicatePath
}
jup.seen[cName] = struct{}{}
}
return &e, err
}
// NewJSONUnpacker provides an Unpacker that reads Entries (SegmentType and
// FileType) as a json document.
//
// Each Entry read are expected to be delimited by new line.
func NewJSONUnpacker(r io.Reader) Unpacker {
return &jsonUnpacker{
dec: json.NewDecoder(r),
seen: seenNames{},
}
}
type jsonPacker struct {
w io.Writer
e *json.Encoder
pos int
seen seenNames
}
type seenNames map[string]struct{}
func (jp *jsonPacker) AddEntry(e Entry) (int, error) {
// if Name is not valid utf8, switch it to raw first.
if e.Name != "" {
if !utf8.ValidString(e.Name) {
e.NameRaw = []byte(e.Name)
e.Name = ""
}
}
// check early for dup name
if e.Type == FileType {
cName := filepath.Clean(e.GetName())
if _, ok := jp.seen[cName]; ok {
return -1, ErrDuplicatePath
}
jp.seen[cName] = struct{}{}
}
e.Position = jp.pos
err := jp.e.Encode(e)
if err != nil {
return -1, err
}
// made it this far, increment now
jp.pos++
return e.Position, nil
}
// NewJSONPacker provides a Packer that writes each Entry (SegmentType and
// FileType) as a json document.
//
// The Entries are delimited by new line.
func NewJSONPacker(w io.Writer) Packer {
return &jsonPacker{
w: w,
e: json.NewEncoder(w),
seen: seenNames{},
}
}
/*
TODO(vbatts) perhaps have a more compact packer/unpacker, maybe using msgapck
(https://github.com/ugorji/go)
Even though, since our jsonUnpacker and jsonPacker just take
io.Reader/io.Writer, then we can get away with passing them a
gzip.Reader/gzip.Writer
*/