diff --git a/docker/inspect.go b/docker/inspect.go index d33e05d5..5d29d9e0 100644 --- a/docker/inspect.go +++ b/docker/inspect.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "os" + "path/filepath" "strings" "time" @@ -19,6 +20,7 @@ import ( "github.com/docker/docker/registry" engineTypes "github.com/docker/engine-api/types" registryTypes "github.com/docker/engine-api/types/registry" + "github.com/opencontainers/runc/libcontainer/user" "github.com/runcom/skopeo/types" "golang.org/x/net/context" ) @@ -195,6 +197,7 @@ func getAuthConfig(c *cli.Context, index *registryTypes.IndexInfo) (engineTypes. } ) + // TODO(runcom): ??? atomic needs this // TODO(runcom): implement this to opt-in for docker-cfg, no need to make this // work by default with docker's conf //useDockerConf := c.GlobalString("use-docker-cfg") @@ -203,6 +206,16 @@ func getAuthConfig(c *cli.Context, index *registryTypes.IndexInfo) (engineTypes. return defAuthConfig, nil } + sudoUserEnv := os.Getenv("SUDO_USER") + if sudoUserEnv != "" { + sudoUser, err := user.LookupUser(sudoUserEnv) + if err != nil { + return engineTypes.AuthConfig{}, err + } + // override the given docker conf file if called with sudo + cfg = filepath.Join(sudoUser.Home, ".docker") + } + if _, err := os.Stat(cfg); err != nil { logrus.Debugf("Docker cli config file %q not found: %v, falling back to --username and --password if needed", cfg, err) if os.IsNotExist(err) { diff --git a/hack/vendor.sh b/hack/vendor.sh index 83beaf73..2a85b5de 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -6,19 +6,21 @@ rm -rf vendor/ source 'hack/.vendor-helpers.sh' clone git github.com/codegangsta/cli v1.2.0 -clone git github.com/Sirupsen/logrus v0.8.7 # logrus is a common dependency among multiple deps -clone git github.com/docker/docker master +clone git github.com/Sirupsen/logrus v0.8.7 +clone git github.com/vbatts/tar-split master +clone git github.com/gorilla/mux master +clone git github.com/gorilla/context master clone git golang.org/x/net master https://github.com/golang/net.git -clone git github.com/docker/engine-api master -clone git github.com/docker/distribution v2.3.0-rc.1 clone git github.com/go-check/check v1 + +clone git github.com/docker/docker v1.10.1 +clone git github.com/docker/engine-api v0.2.3 +clone git github.com/docker/distribution 0f2d99b13ae0cfbcf118eff103e6e680b726b47e + clone git github.com/docker/go-connections master clone git github.com/docker/go-units master clone git github.com/docker/libtrust master clone git github.com/opencontainers/runc master -clone git github.com/vbatts/tar-split master -clone git github.com/gorilla/mux master -clone git github.com/gorilla/context master clean diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go index 222916b8..1acb0500 100644 --- a/vendor/github.com/docker/distribution/manifests.go +++ b/vendor/github.com/docker/distribution/manifests.go @@ -2,7 +2,7 @@ package distribution import ( "fmt" - "strings" + "mime" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" @@ -84,19 +84,23 @@ var mappings = make(map[string]UnmarshalFunc, 0) // UnmarshalManifest looks up manifest unmarshall functions based on // MediaType func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { - // Need to look up by the actual content type, not the raw contents of + // Need to look up by the actual media type, not the raw contents of // the header. Strip semicolons and anything following them. var mediatype string - semicolonIndex := strings.Index(ctHeader, ";") - if semicolonIndex != -1 { - mediatype = ctHeader[:semicolonIndex] - } else { - mediatype = ctHeader + if ctHeader != "" { + var err error + mediatype, _, err = mime.ParseMediaType(ctHeader) + if err != nil { + return nil, Descriptor{}, err + } } unmarshalFunc, ok := mappings[mediatype] if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype: %s", mediatype) + unmarshalFunc, ok = mappings[""] + if !ok { + return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) + } } return unmarshalFunc(p) diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index c188472a..6f079cbb 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -6,7 +6,7 @@ // reference := repository [ ":" tag ] [ "@" digest ] // name := [hostname '/'] component ['/' component]* // hostname := hostcomponent ['.' hostcomponent]* [':' port-number] -// hostcomponent := /([a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])/ +// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // component := alpha-numeric [separator alpha-numeric]* // alpha-numeric := /[a-z0-9]+/ diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go index a4ffe5b6..b465abf5 100644 --- a/vendor/github.com/docker/distribution/reference/regexp.go +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -22,7 +22,7 @@ var ( // hostnameComponentRegexp restricts the registry hostname component of a // repository name to start with a component as defined by hostnameRegexp // and followed by an optional port. - hostnameComponentRegexp = match(`(?:[a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])`) + hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) // hostnameRegexp defines the structure of potential hostname components // that may be part of image names. This is purposely a subset of what is diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index 51be1e27..96064a83 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -18,7 +18,7 @@ import ( // Common constants for daemon and client. const ( // Version of Current REST API - DefaultVersion version.Version = "1.23" + DefaultVersion version.Version = "1.22" // MinVersion represents Minimum REST API version supported MinVersion version.Version = "1.12" diff --git a/vendor/github.com/docker/docker/distribution/pull_v2.go b/vendor/github.com/docker/docker/distribution/pull_v2.go index 7bb17100..71a1c4e5 100644 --- a/vendor/github.com/docker/docker/distribution/pull_v2.go +++ b/vendor/github.com/docker/docker/distribution/pull_v2.go @@ -171,6 +171,10 @@ func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progre _, err = io.Copy(tmpFile, io.TeeReader(reader, verifier)) if err != nil { + tmpFile.Close() + if err := os.Remove(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } return nil, 0, retryOnError(err) } @@ -179,8 +183,9 @@ func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progre if !verifier.Verified() { err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) logrus.Error(err) + tmpFile.Close() - if err := os.RemoveAll(tmpFile.Name()); err != nil { + if err := os.Remove(tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) } @@ -191,7 +196,14 @@ func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progre logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) - tmpFile.Seek(0, 0) + _, err = tmpFile.Seek(0, os.SEEK_SET) + if err != nil { + tmpFile.Close() + if err := os.Remove(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return nil, 0, xfer.DoNotRetry{Err: err} + } return ioutils.NewReadCloserWrapper(tmpFile, tmpFileCloser(tmpFile)), size, nil } diff --git a/vendor/github.com/docker/docker/distribution/push.go b/vendor/github.com/docker/docker/distribution/push.go index 445f6bb6..ecf59ec2 100644 --- a/vendor/github.com/docker/docker/distribution/push.go +++ b/vendor/github.com/docker/docker/distribution/push.go @@ -171,7 +171,14 @@ func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushCo // argument so that it can be used with httpBlobWriter's ReadFrom method. // Using httpBlobWriter's Write method would send a PATCH request for every // Write call. -func compress(in io.Reader) io.ReadCloser { +// +// The second return value is a channel that gets closed when the goroutine +// is finished. This allows the caller to make sure the goroutine finishes +// before it releases any resources connected with the reader that was +// passed in. +func compress(in io.Reader) (io.ReadCloser, chan struct{}) { + compressionDone := make(chan struct{}) + pipeReader, pipeWriter := io.Pipe() // Use a bufio.Writer to avoid excessive chunking in HTTP request. bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) @@ -190,7 +197,8 @@ func compress(in io.Reader) io.ReadCloser { } else { pipeWriter.Close() } + close(compressionDone) }() - return pipeReader + return pipeReader, compressionDone } diff --git a/vendor/github.com/docker/docker/distribution/push_v2.go b/vendor/github.com/docker/docker/distribution/push_v2.go index 76d43470..e5f9de81 100644 --- a/vendor/github.com/docker/docker/distribution/push_v2.go +++ b/vendor/github.com/docker/docker/distribution/push_v2.go @@ -345,8 +345,11 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress. size, _ := pd.layer.DiffSize() reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing") - defer reader.Close() - compressedReader := compress(reader) + compressedReader, compressionDone := compress(reader) + defer func() { + reader.Close() + <-compressionDone + }() digester := digest.Canonical.New() tee := io.TeeReader(compressedReader, digester.Hash()) diff --git a/vendor/github.com/docker/docker/distribution/xfer/transfer.go b/vendor/github.com/docker/docker/distribution/xfer/transfer.go index 7f323c1d..68522254 100644 --- a/vendor/github.com/docker/docker/distribution/xfer/transfer.go +++ b/vendor/github.com/docker/docker/distribution/xfer/transfer.go @@ -1,6 +1,7 @@ package xfer import ( + "runtime" "sync" "github.com/docker/docker/pkg/progress" @@ -38,7 +39,7 @@ type Transfer interface { Watch(progressOutput progress.Output) *Watcher Release(*Watcher) Context() context.Context - Cancel() + Close() Done() <-chan struct{} Released() <-chan struct{} Broadcast(masterProgressChan <-chan progress.Progress) @@ -61,11 +62,14 @@ type transfer struct { // running remains open as long as the transfer is in progress. running chan struct{} - // hasWatchers stays open until all watchers release the transfer. - hasWatchers chan struct{} + // released stays open until all watchers release the transfer and + // the transfer is no longer tracked by the transfer manager. + released chan struct{} // broadcastDone is true if the master progress channel has closed. broadcastDone bool + // closed is true if Close has been called + closed bool // broadcastSyncChan allows watchers to "ping" the broadcasting // goroutine to wait for it for deplete its input channel. This ensures // a detaching watcher won't miss an event that was sent before it @@ -78,7 +82,7 @@ func NewTransfer() Transfer { t := &transfer{ watchers: make(map[chan struct{}]*Watcher), running: make(chan struct{}), - hasWatchers: make(chan struct{}), + released: make(chan struct{}), broadcastSyncChan: make(chan struct{}), } @@ -144,13 +148,13 @@ func (t *transfer) Watch(progressOutput progress.Output) *Watcher { running: make(chan struct{}), } + t.watchers[w.releaseChan] = w + if t.broadcastDone { close(w.running) return w } - t.watchers[w.releaseChan] = w - go func() { defer func() { close(w.running) @@ -202,8 +206,19 @@ func (t *transfer) Release(watcher *Watcher) { delete(t.watchers, watcher.releaseChan) if len(t.watchers) == 0 { - close(t.hasWatchers) - t.cancel() + if t.closed { + // released may have been closed already if all + // watchers were released, then another one was added + // while waiting for a previous watcher goroutine to + // finish. + select { + case <-t.released: + default: + close(t.released) + } + } else { + t.cancel() + } } t.mu.Unlock() @@ -223,9 +238,9 @@ func (t *transfer) Done() <-chan struct{} { } // Released returns a channel which is closed once all watchers release the -// transfer. +// transfer AND the transfer is no longer tracked by the transfer manager. func (t *transfer) Released() <-chan struct{} { - return t.hasWatchers + return t.released } // Context returns the context associated with the transfer. @@ -233,9 +248,15 @@ func (t *transfer) Context() context.Context { return t.ctx } -// Cancel cancels the context associated with the transfer. -func (t *transfer) Cancel() { - t.cancel() +// Close is called by the transfer manager when the transfer is no longer +// being tracked. +func (t *transfer) Close() { + t.mu.Lock() + t.closed = true + if len(t.watchers) == 0 { + close(t.released) + } + t.mu.Unlock() } // DoFunc is a function called by the transfer manager to actually perform @@ -280,10 +301,33 @@ func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput tm.mu.Lock() defer tm.mu.Unlock() - if xfer, present := tm.transfers[key]; present { + for { + xfer, present := tm.transfers[key] + if !present { + break + } // Transfer is already in progress. watcher := xfer.Watch(progressOutput) - return xfer, watcher + + select { + case <-xfer.Context().Done(): + // We don't want to watch a transfer that has been cancelled. + // Wait for it to be removed from the map and try again. + xfer.Release(watcher) + tm.mu.Unlock() + // The goroutine that removes this transfer from the + // map is also waiting for xfer.Done(), so yield to it. + // This could be avoided by adding a Closed method + // to Transfer to allow explicitly waiting for it to be + // removed the map, but forcing a scheduling round in + // this very rare case seems better than bloating the + // interface definition. + runtime.Gosched() + <-xfer.Done() + tm.mu.Lock() + default: + return xfer, watcher + } } start := make(chan struct{}) @@ -318,6 +362,7 @@ func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput } delete(tm.transfers, key) tm.mu.Unlock() + xfer.Close() return } } diff --git a/vendor/github.com/docker/docker/image/store.go b/vendor/github.com/docker/docker/image/store.go index 302be2f3..1279f59b 100644 --- a/vendor/github.com/docker/docker/image/store.go +++ b/vendor/github.com/docker/docker/image/store.go @@ -231,6 +231,9 @@ func (is *store) SetParent(id, parent ID) error { if parentMeta == nil { return fmt.Errorf("unknown parent image ID %s", parent.String()) } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } parentMeta.children[id] = struct{}{} return is.fs.SetMetadata(id, "parent", []byte(parent)) } diff --git a/vendor/github.com/docker/docker/layer/migration.go b/vendor/github.com/docker/docker/layer/migration.go index 9779ab79..ac0f0065 100644 --- a/vendor/github.com/docker/docker/layer/migration.go +++ b/vendor/github.com/docker/docker/layer/migration.go @@ -32,7 +32,7 @@ func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent } if !ls.driver.Exists(graphID) { - return errors.New("graph ID does not exist") + return fmt.Errorf("graph ID does not exist: %q", graphID) } var p *roLayer diff --git a/vendor/github.com/docker/docker/layer/ro_layer.go b/vendor/github.com/docker/docker/layer/ro_layer.go index 51e0921d..92b0ea0e 100644 --- a/vendor/github.com/docker/docker/layer/ro_layer.go +++ b/vendor/github.com/docker/docker/layer/ro_layer.go @@ -1,6 +1,11 @@ package layer -import "io" +import ( + "fmt" + "io" + + "github.com/docker/distribution/digest" +) type roLayer struct { chainID ChainID @@ -29,7 +34,11 @@ func (rl *roLayer) TarStream() (io.ReadCloser, error) { pw.Close() } }() - return pr, nil + rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID)) + if err != nil { + return nil, err + } + return rc, nil } func (rl *roLayer) ChainID() ChainID { @@ -117,3 +126,39 @@ func storeLayer(tx MetadataTransaction, layer *roLayer) error { return nil } + +func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return nil, err + } + return &verifiedReadCloser{ + rc: rc, + dgst: dgst, + verifier: verifier, + }, nil +} + +type verifiedReadCloser struct { + rc io.ReadCloser + dgst digest.Digest + verifier digest.Verifier +} + +func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { + n, err = vrc.rc.Read(p) + if n > 0 { + if n, err := vrc.verifier.Write(p[:n]); err != nil { + return n, err + } + } + if err == io.EOF { + if !vrc.verifier.Verified() { + err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) + } + } + return +} +func (vrc *verifiedReadCloser) Close() error { + return vrc.rc.Close() +} diff --git a/vendor/github.com/docker/docker/pkg/mflag/flag.go b/vendor/github.com/docker/docker/pkg/mflag/flag.go index e2a0c422..e7fabe0a 100644 --- a/vendor/github.com/docker/docker/pkg/mflag/flag.go +++ b/vendor/github.com/docker/docker/pkg/mflag/flag.go @@ -1163,7 +1163,7 @@ func (fs *FlagSet) ReportError(str string, withHelp bool) { str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'" } } - fmt.Fprintf(fs.Out(), "%s: %s.\n", os.Args[0], str) + fmt.Fprintf(fs.Out(), "docker: %s.\n", str) } // Parsed reports whether fs.Parse has been called. diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go index bd11f4e3..18f2d735 100644 --- a/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go @@ -3,20 +3,21 @@ package term import ( + "fmt" "io" "os" "os/signal" "syscall" "github.com/Azure/go-ansiterm/winterm" + "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/term/windows" ) // State holds the console mode for the terminal. type State struct { - inMode, outMode uint32 - inHandle, outHandle syscall.Handle + mode uint32 } // Winsize is used for window size. @@ -27,27 +28,17 @@ type Winsize struct { y uint16 } -const ( - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx - enableVirtualTerminalInput = 0x0200 - enableVirtualTerminalProcessing = 0x0004 -) - -// usingNativeConsole is true if we are using the Windows native console -var usingNativeConsole bool - // StdStreams returns the standard streams (stdin, stdout, stedrr). func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { switch { case os.Getenv("ConEmuANSI") == "ON": - // The ConEmu shell emulates ANSI well by default. - return os.Stdin, os.Stdout, os.Stderr + // The ConEmu terminal emulates ANSI on output streams well. + return windows.ConEmuStreams() case os.Getenv("MSYSTEM") != "": // MSYS (mingw) does not emulate ANSI well. return windows.ConsoleStreams() default: if useNativeConsole() { - usingNativeConsole = true return os.Stdin, os.Stdout, os.Stderr } return windows.ConsoleStreams() @@ -63,7 +54,7 @@ func useNativeConsole() bool { return false } - // Native console is not available before major version 10 + // Native console is not available major version 10 if osv.MajorVersion < 10 { return false } @@ -73,17 +64,6 @@ func useNativeConsole() bool { return false } - // Get the console modes. If this fails, we can't use the native console - state, err := getNativeConsole() - if err != nil { - return false - } - - // Probe the console to see if it can be enabled. - if nil != probeNativeConsole(state) { - return false - } - // Environment variable override if e := os.Getenv("USE_NATIVE_CONSOLE"); e != "" { if e == "1" { @@ -92,86 +72,32 @@ func useNativeConsole() bool { return false } - // TODO Windows. The native emulator still has issues which + // Get the handle to stdout + stdOutHandle, err := syscall.GetStdHandle(syscall.STD_OUTPUT_HANDLE) + if err != nil { + return false + } + + // Get the console mode from the consoles stdout handle + var mode uint32 + if err := syscall.GetConsoleMode(stdOutHandle, &mode); err != nil { + return false + } + + // Legacy mode does not have native ANSI emulation. + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + const enableVirtualTerminalProcessing = 0x0004 + if mode&enableVirtualTerminalProcessing == 0 { + return false + } + + // TODO Windows (Post TP4). The native emulator still has issues which // mean it shouldn't be enabled for everyone. Change this next line to true // to change the default to "enable if available". In the meantime, users // can still try it out by using USE_NATIVE_CONSOLE env variable. return false } -// getNativeConsole returns the console modes ('state') for the native Windows console -func getNativeConsole() (State, error) { - var ( - err error - state State - ) - - // Get the handle to stdout - if state.outHandle, err = syscall.GetStdHandle(syscall.STD_OUTPUT_HANDLE); err != nil { - return state, err - } - - // Get the console mode from the consoles stdout handle - if err = syscall.GetConsoleMode(state.outHandle, &state.outMode); err != nil { - return state, err - } - - // Get the handle to stdin - if state.inHandle, err = syscall.GetStdHandle(syscall.STD_INPUT_HANDLE); err != nil { - return state, err - } - - // Get the console mode from the consoles stdin handle - if err = syscall.GetConsoleMode(state.inHandle, &state.inMode); err != nil { - return state, err - } - - return state, nil -} - -// probeNativeConsole probes the console to determine if native can be supported, -func probeNativeConsole(state State) error { - if err := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode|enableVirtualTerminalProcessing); err != nil { - return err - } - defer winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode) - - if err := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode|enableVirtualTerminalInput); err != nil { - return err - } - defer winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode) - - return nil -} - -// enableNativeConsole turns on native console mode -func enableNativeConsole(state State) error { - if err := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode|enableVirtualTerminalProcessing); err != nil { - return err - } - - if err := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode|enableVirtualTerminalInput); err != nil { - winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode) // restore out if we can - return err - } - - return nil -} - -// disableNativeConsole turns off native console mode -func disableNativeConsole(state *State) error { - // Try and restore both in an out before error checking. - errout := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode) - errin := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode) - if errout != nil { - return errout - } - if errin != nil { - return errin - } - return nil -} - // GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. func GetFdInfo(in interface{}) (uintptr, bool) { return windows.GetHandleInfo(in) @@ -179,6 +105,7 @@ func GetFdInfo(in interface{}) (uintptr, bool) { // GetWinsize returns the window size based on the specified file descriptor. func GetWinsize(fd uintptr) (*Winsize, error) { + info, err := winterm.GetConsoleScreenBufferInfo(fd) if err != nil { return nil, err @@ -190,9 +117,58 @@ func GetWinsize(fd uintptr) (*Winsize, error) { x: 0, y: 0} + // Note: GetWinsize is called frequently -- uncomment only for excessive details + // logrus.Debugf("[windows] GetWinsize: Console(%v)", info.String()) + // logrus.Debugf("[windows] GetWinsize: Width(%v), Height(%v), x(%v), y(%v)", winsize.Width, winsize.Height, winsize.x, winsize.y) return winsize, nil } +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + + // Ensure the requested dimensions are no larger than the maximum window size + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return err + } + + if ws.Width == 0 || ws.Height == 0 || ws.Width > uint16(info.MaximumWindowSize.X) || ws.Height > uint16(info.MaximumWindowSize.Y) { + return fmt.Errorf("Illegal window size: (%v,%v) -- Maximum allow: (%v,%v)", + ws.Width, ws.Height, info.MaximumWindowSize.X, info.MaximumWindowSize.Y) + } + + // Narrow the sizes to that used by Windows + width := winterm.SHORT(ws.Width) + height := winterm.SHORT(ws.Height) + + // Set the dimensions while ensuring they remain within the bounds of the backing console buffer + // -- Shrinking will always succeed. Growing may push the edges past the buffer boundary. When that occurs, + // shift the upper left just enough to keep the new window within the buffer. + rect := info.Window + if width < rect.Right-rect.Left+1 { + rect.Right = rect.Left + width - 1 + } else if width > rect.Right-rect.Left+1 { + rect.Right = rect.Left + width - 1 + if rect.Right >= info.Size.X { + rect.Left = info.Size.X - width + rect.Right = info.Size.X - 1 + } + } + + if height < rect.Bottom-rect.Top+1 { + rect.Bottom = rect.Top + height - 1 + } else if height > rect.Bottom-rect.Top+1 { + rect.Bottom = rect.Top + height - 1 + if rect.Bottom >= info.Size.Y { + rect.Top = info.Size.Y - height + rect.Bottom = info.Size.Y - 1 + } + } + logrus.Debugf("[windows] SetWinsize: Requested((%v,%v)) Actual(%v)", ws.Width, ws.Height, rect) + + return winterm.SetConsoleWindowInfo(fd, true, rect) +} + // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { return windows.IsConsole(fd) @@ -201,36 +177,25 @@ func IsTerminal(fd uintptr) bool { // RestoreTerminal restores the terminal connected to the given file descriptor // to a previous state. func RestoreTerminal(fd uintptr, state *State) error { - if usingNativeConsole { - return disableNativeConsole(state) - } - return winterm.SetConsoleMode(fd, state.outMode) + return winterm.SetConsoleMode(fd, state.mode) } // SaveState saves the state of the terminal connected to the given file descriptor. func SaveState(fd uintptr) (*State, error) { - if usingNativeConsole { - state, err := getNativeConsole() - if err != nil { - return nil, err - } - return &state, nil - } - mode, e := winterm.GetConsoleMode(fd) if e != nil { return nil, e } - - return &State{outMode: mode}, nil + return &State{mode}, nil } // DisableEcho disables echo for the terminal connected to the given file descriptor. // -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx func DisableEcho(fd uintptr, state *State) error { - mode := state.inMode + mode := state.mode mode &^= winterm.ENABLE_ECHO_INPUT mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT + err := winterm.SetConsoleMode(fd, mode) if err != nil { return err @@ -262,17 +227,10 @@ func MakeRaw(fd uintptr) (*State, error) { return nil, err } - mode := state.inMode - if usingNativeConsole { - if err := enableNativeConsole(*state); err != nil { - return nil, err - } - mode |= enableVirtualTerminalInput - } - // See // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + mode := state.mode // Disable these modes mode &^= winterm.ENABLE_ECHO_INPUT diff --git a/vendor/github.com/docker/docker/reference/reference.go b/vendor/github.com/docker/docker/reference/reference.go index 1e348247..e355596e 100644 --- a/vendor/github.com/docker/docker/reference/reference.go +++ b/vendor/github.com/docker/docker/reference/reference.go @@ -1,6 +1,7 @@ package reference import ( + "errors" "fmt" "strings" @@ -72,7 +73,10 @@ func ParseNamed(s string) (Named, error) { // WithName returns a named object representing the given string. If the input // is invalid ErrReferenceInvalidFormat will be returned. func WithName(name string) (Named, error) { - name = normalize(name) + name, err := normalize(name) + if err != nil { + return nil, err + } if err := validateName(name); err != nil { return nil, err } @@ -172,15 +176,18 @@ func splitHostname(name string) (hostname, remoteName string) { // normalize returns a repository name in its normalized form, meaning it // will not contain default hostname nor library/ prefix for official images. -func normalize(name string) string { +func normalize(name string) (string, error) { host, remoteName := splitHostname(name) + if strings.ToLower(remoteName) != remoteName { + return "", errors.New("invalid reference format: repository name must be lowercase") + } if host == DefaultHostname { if strings.HasPrefix(remoteName, DefaultRepoPrefix) { - return strings.TrimPrefix(remoteName, DefaultRepoPrefix) + return strings.TrimPrefix(remoteName, DefaultRepoPrefix), nil } - return remoteName + return remoteName, nil } - return name + return name, nil } func validateName(name string) error { diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go index bacc4aed..d3f78108 100644 --- a/vendor/github.com/docker/docker/registry/registry.go +++ b/vendor/github.com/docker/docker/registry/registry.go @@ -31,7 +31,6 @@ var ( // ErrAlreadyExists is an error returned if an image being pushed // already exists on the remote side ErrAlreadyExists = errors.New("Image already exists") - errLoginRequired = errors.New("Authentication is required.") ) // dockerUserAgent is the User-Agent the Docker client uses to identify itself. diff --git a/vendor/github.com/docker/docker/registry/session.go b/vendor/github.com/docker/docker/registry/session.go index 57acbc0c..4b18d0d1 100644 --- a/vendor/github.com/docker/docker/registry/session.go +++ b/vendor/github.com/docker/docker/registry/session.go @@ -19,6 +19,7 @@ import ( "strings" "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/stringid" @@ -213,7 +214,7 @@ func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { defer res.Body.Close() if res.StatusCode != 200 { if res.StatusCode == 401 { - return nil, errLoginRequired + return nil, errcode.ErrorCodeUnauthorized.WithArgs() } return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) } @@ -427,7 +428,7 @@ func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, erro } defer res.Body.Close() if res.StatusCode == 401 { - return nil, errLoginRequired + return nil, errcode.ErrorCodeUnauthorized.WithArgs() } // TODO: Right now we're ignoring checksums in the response body. // In the future, we need to use them to check image validity. @@ -661,7 +662,7 @@ func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, defer res.Body.Close() if res.StatusCode == 401 { - return nil, errLoginRequired + return nil, errcode.ErrorCodeUnauthorized.WithArgs() } var tokens, endpoints []string diff --git a/vendor/github.com/docker/engine-api/types/client.go b/vendor/github.com/docker/engine-api/types/client.go index 81eb29c9..16c1cb10 100644 --- a/vendor/github.com/docker/engine-api/types/client.go +++ b/vendor/github.com/docker/engine-api/types/client.go @@ -7,7 +7,6 @@ import ( "github.com/docker/engine-api/types/container" "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/image" "github.com/docker/go-units" ) @@ -127,7 +126,7 @@ type ImageBuildOptions struct { NoCache bool Remove bool ForceRemove bool - PullParent image.PullBehavior + PullParent bool IsolationLevel container.IsolationLevel CPUSetCPUs string CPUSetMems string diff --git a/vendor/github.com/docker/engine-api/types/container/host_config.go b/vendor/github.com/docker/engine-api/types/container/host_config.go index b7c459ea..f43263d6 100644 --- a/vendor/github.com/docker/engine-api/types/container/host_config.go +++ b/vendor/github.com/docker/engine-api/types/container/host_config.go @@ -151,11 +151,6 @@ func (rp *RestartPolicy) IsUnlessStopped() bool { return rp.Name == "unless-stopped" } -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - // LogConfig represents the logging configuration of the container. type LogConfig struct { Type string @@ -183,7 +178,7 @@ type Resources struct { KernelMemory int64 // Kernel memory limit (in bytes) Memory int64 // Memory limit (in bytes) MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to disable swap MemorySwappiness *int64 // Tuning container memory swappiness behaviour OomKillDisable *bool // Whether to disable OOM Killer or not PidsLimit int64 // Setting pids limit for a container @@ -195,7 +190,6 @@ type Resources struct { type UpdateConfig struct { // Contains container's resources (cgroups, ulimits) Resources - RestartPolicy RestartPolicy } // HostConfig the non-portable Config structure of a container. diff --git a/vendor/github.com/docker/engine-api/types/image/pull_behavior.go b/vendor/github.com/docker/engine-api/types/image/pull_behavior.go deleted file mode 100644 index d30df2ce..00000000 --- a/vendor/github.com/docker/engine-api/types/image/pull_behavior.go +++ /dev/null @@ -1,41 +0,0 @@ -package image - -import ( - "fmt" -) - -// PullBehavior can be one of: never, always, or missing -type PullBehavior int - -// PullBehavior can be one of: never, always, or missing -const ( - PullNever PullBehavior = iota - PullAlways - PullMissing -) - -// ParsePullBehavior validates and converts a string into a PullBehavior -func ParsePullBehavior(pullVal string) (PullBehavior, error) { - switch pullVal { - case "never": - return PullNever, nil - case "always": - return PullAlways, nil - case "missing", "": - return PullMissing, nil - } - return PullNever, fmt.Errorf("Invalid pull behavior '%s'", pullVal) -} - -// String returns a string representation of a PullBehavior -func (p PullBehavior) String() string { - switch p { - case PullNever: - return "never" - case PullAlways: - return "always" - case PullMissing: - return "missing" - } - return "" -} diff --git a/vendor/github.com/docker/engine-api/types/types.go b/vendor/github.com/docker/engine-api/types/types.go index de8b0bec..64c99818 100644 --- a/vendor/github.com/docker/engine-api/types/types.go +++ b/vendor/github.com/docker/engine-api/types/types.go @@ -142,7 +142,6 @@ type Container struct { SizeRw int64 `json:",omitempty"` SizeRootFs int64 `json:",omitempty"` Labels map[string]string - State string Status string HostConfig struct { NetworkMode string `json:",omitempty"` @@ -224,6 +223,8 @@ type Info struct { Architecture string IndexServerAddress string RegistryConfig *registry.ServiceConfig + InitSha1 string + InitPath string NCPU int MemTotal int64 DockerRootDir string @@ -388,7 +389,6 @@ type NetworkResource struct { Scope string Driver string IPAM network.IPAM - Internal bool Containers map[string]EndpointResource Options map[string]string } diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index c08e53a0..9378c358 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -85,7 +85,7 @@ func certPool(caFile string) (*x509.CertPool, error) { func Client(options Options) (*tls.Config, error) { tlsConfig := ClientDefault tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify - if !options.InsecureSkipVerify { + if !options.InsecureSkipVerify && options.CAFile != "" { CAs, err := certPool(options.CAFile) if err != nil { return nil, err @@ -93,7 +93,7 @@ func Client(options Options) (*tls.Config, error) { tlsConfig.RootCAs = CAs } - if options.CertFile != "" && options.KeyFile != "" { + if options.CertFile != "" || options.KeyFile != "" { tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) if err != nil { return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go index 3b59daff..989edd29 100644 --- a/vendor/github.com/docker/go-units/size.go +++ b/vendor/github.com/docker/go-units/size.go @@ -31,7 +31,7 @@ type unitMap map[string]int64 var ( decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) + sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) ) var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} @@ -77,19 +77,19 @@ func RAMInBytes(size string) (int64, error) { // Parses the human-readable size string into the amount it represents. func parseSize(sizeStr string, uMap unitMap) (int64, error) { matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 3 { + if len(matches) != 4 { return -1, fmt.Errorf("invalid size: '%s'", sizeStr) } - size, err := strconv.ParseInt(matches[1], 10, 0) + size, err := strconv.ParseFloat(matches[1], 64) if err != nil { return -1, err } - unitPrefix := strings.ToLower(matches[2]) + unitPrefix := strings.ToLower(matches[3]) if mul, ok := uMap[unitPrefix]; ok { - size *= mul + size *= float64(mul) } - return size, nil + return int64(size), nil } diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/github.com/vbatts/tar-split/archive/tar/common.go index c31df062..36f4e239 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/common.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/common.go @@ -327,3 +327,14 @@ func toASCII(s string) string { } return buf.String() } + +// isHeaderOnlyType checks if the given type flag is of the type that has no +// data section even if a size is specified. +func isHeaderOnlyType(flag byte) bool { + switch flag { + case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo: + return true + default: + return false + } +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index 4168ea2c..a8b63a27 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -12,6 +12,7 @@ import ( "errors" "io" "io/ioutil" + "math" "os" "strconv" "strings" @@ -39,6 +40,10 @@ type Reader struct { rawBytes *bytes.Buffer // last raw bits } +type parser struct { + err error // Last error seen +} + // RawBytes accesses the raw bytes of the archive, apart from the file payload itself. // This includes the header and padding. // @@ -70,12 +75,36 @@ type regFileReader struct { nb int64 // number of unread bytes for current file entry } -// A sparseFileReader is a numBytesReader for reading sparse file data from a tar archive. +// A sparseFileReader is a numBytesReader for reading sparse file data from a +// tar archive. type sparseFileReader struct { - rfr *regFileReader // reads the sparse-encoded file data - sp []sparseEntry // the sparse map for the file - pos int64 // keeps track of file position - tot int64 // total size of the file + rfr numBytesReader // Reads the sparse-encoded file data + sp []sparseEntry // The sparse map for the file + pos int64 // Keeps track of file position + total int64 // Total size of the file +} + +// A sparseEntry holds a single entry in a sparse file's sparse map. +// +// Sparse files are represented using a series of sparseEntrys. +// Despite the name, a sparseEntry represents an actual data fragment that +// references data found in the underlying archive stream. All regions not +// covered by a sparseEntry are logically filled with zeros. +// +// For example, if the underlying raw file contains the 10-byte data: +// var compactData = "abcdefgh" +// +// And the sparse map has the following entries: +// var sp = []sparseEntry{ +// {offset: 2, numBytes: 5} // Data fragment for [2..7] +// {offset: 18, numBytes: 3} // Data fragment for [18..21] +// } +// +// Then the content of the resulting sparse file with a "real" size of 25 is: +// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 +type sparseEntry struct { + offset int64 // Starting position of the fragment + numBytes int64 // Length of the fragment } // Keywords for GNU sparse files in a PAX extended header @@ -109,7 +138,6 @@ func NewReader(r io.Reader) *Reader { return &Reader{r: r} } // // io.EOF is returned at the end of the input. func (tr *Reader) Next() (*Header, error) { - var hdr *Header if tr.RawAccounting { if tr.rawBytes == nil { tr.rawBytes = bytes.NewBuffer(nil) @@ -117,98 +145,88 @@ func (tr *Reader) Next() (*Header, error) { tr.rawBytes.Reset() } } - if tr.err == nil { - tr.skipUnread() - } + if tr.err != nil { - return hdr, tr.err + return nil, tr.err } - hdr = tr.readHeader() - if hdr == nil { - return hdr, tr.err - } - // Check for PAX/GNU header. - switch hdr.Typeflag { - case TypeXHeader: - // PAX extended header - headers, err := parsePAX(tr) - if err != nil { - return nil, err - } - // We actually read the whole file, - // but this skips alignment padding - tr.skipUnread() + + var hdr *Header + var extHdrs map[string]string + + // Externally, Next iterates through the tar archive as if it is a series of + // files. Internally, the tar format often uses fake "files" to add meta + // data that describes the next file. These meta data "files" should not + // normally be visible to the outside. As such, this loop iterates through + // one or more "header files" until it finds a "normal file". +loop: + for { + tr.err = tr.skipUnread() if tr.err != nil { return nil, tr.err } + hdr = tr.readHeader() - if hdr == nil { + if tr.err != nil { return nil, tr.err } - mergePAX(hdr, headers) + // Check for PAX/GNU special headers and files. + switch hdr.Typeflag { + case TypeXHeader: + extHdrs, tr.err = parsePAX(tr) + if tr.err != nil { + return nil, tr.err + } + continue loop // This is a meta header affecting the next header + case TypeGNULongName, TypeGNULongLink: + var realname []byte + realname, tr.err = ioutil.ReadAll(tr) + if tr.err != nil { + return nil, tr.err + } - // Check for a PAX format sparse file - sp, err := tr.checkForGNUSparsePAXHeaders(hdr, headers) - if err != nil { - tr.err = err - return nil, err - } - if sp != nil { - // Current file is a PAX format GNU sparse file. - // Set the current file reader to a sparse file reader. - tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size} - } - return hdr, nil - case TypeGNULongName: - // We have a GNU long name header. Its contents are the real file name. - realname, err := ioutil.ReadAll(tr) - if err != nil { - return nil, err - } - var buf []byte - if tr.RawAccounting { - if _, err = tr.rawBytes.Write(realname); err != nil { + if tr.RawAccounting { + if _, tr.err = tr.rawBytes.Write(realname); tr.err != nil { + return nil, tr.err + } + } + + // Convert GNU extensions to use PAX headers. + if extHdrs == nil { + extHdrs = make(map[string]string) + } + var p parser + switch hdr.Typeflag { + case TypeGNULongName: + extHdrs[paxPath] = p.parseString(realname) + case TypeGNULongLink: + extHdrs[paxLinkpath] = p.parseString(realname) + } + if p.err != nil { + tr.err = p.err + return nil, tr.err + } + continue loop // This is a meta header affecting the next header + default: + mergePAX(hdr, extHdrs) + + // Check for a PAX format sparse file + sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs) + if err != nil { + tr.err = err return nil, err } - buf = make([]byte, tr.rawBytes.Len()) - copy(buf[:], tr.RawBytes()) - } - hdr, err := tr.Next() - // since the above call to Next() resets the buffer, we need to throw the bytes over - if tr.RawAccounting { - buf = append(buf, tr.RawBytes()...) - if _, err = tr.rawBytes.Write(buf); err != nil { - return nil, err + if sp != nil { + // Current file is a PAX format GNU sparse file. + // Set the current file reader to a sparse file reader. + tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size) + if tr.err != nil { + return nil, tr.err + } } + break loop // This is a file, so stop } - hdr.Name = cString(realname) - return hdr, err - case TypeGNULongLink: - // We have a GNU long link header. - realname, err := ioutil.ReadAll(tr) - if err != nil { - return nil, err - } - var buf []byte - if tr.RawAccounting { - if _, err = tr.rawBytes.Write(realname); err != nil { - return nil, err - } - buf = make([]byte, tr.rawBytes.Len()) - copy(buf[:], tr.RawBytes()) - } - hdr, err := tr.Next() - // since the above call to Next() resets the buffer, we need to throw the bytes over - if tr.RawAccounting { - buf = append(buf, tr.RawBytes()...) - if _, err = tr.rawBytes.Write(buf); err != nil { - return nil, err - } - } - hdr.Linkname = cString(realname) - return hdr, err } - return hdr, tr.err + return hdr, nil } // checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then @@ -385,6 +403,7 @@ func parsePAX(r io.Reader) (map[string]string, error) { return nil, err } } + sbuf := string(buf) // For GNU PAX sparse format 0.0 support. // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers. @@ -393,35 +412,17 @@ func parsePAX(r io.Reader) (map[string]string, error) { headers := make(map[string]string) // Each record is constructed as // "%d %s=%s\n", length, keyword, value - for len(buf) > 0 { - // or the header was empty to start with. - var sp int - // The size field ends at the first space. - sp = bytes.IndexByte(buf, ' ') - if sp == -1 { + for len(sbuf) > 0 { + key, value, residual, err := parsePAXRecord(sbuf) + if err != nil { return nil, ErrHeader } - // Parse the first token as a decimal integer. - n, err := strconv.ParseInt(string(buf[:sp]), 10, 0) - if err != nil || n < 5 || int64(len(buf)) < n { - return nil, ErrHeader - } - // Extract everything between the decimal and the n -1 on the - // beginning to eat the ' ', -1 on the end to skip the newline. - var record []byte - record, buf = buf[sp+1:n-1], buf[n:] - // The first equals is guaranteed to mark the end of the key. - // Everything else is value. - eq := bytes.IndexByte(record, '=') - if eq == -1 { - return nil, ErrHeader - } - key, value := record[:eq], record[eq+1:] + sbuf = residual keyStr := string(key) if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes { // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map. - sparseMap.Write(value) + sparseMap.WriteString(value) sparseMap.Write([]byte{','}) } else { // Normal key. Set the value in the headers map. @@ -436,9 +437,42 @@ func parsePAX(r io.Reader) (map[string]string, error) { return headers, nil } -// cString parses bytes as a NUL-terminated C-style string. +// parsePAXRecord parses the input PAX record string into a key-value pair. +// If parsing is successful, it will slice off the currently read record and +// return the remainder as r. +// +// A PAX record is of the following form: +// "%d %s=%s\n" % (size, key, value) +func parsePAXRecord(s string) (k, v, r string, err error) { + // The size field ends at the first space. + sp := strings.IndexByte(s, ' ') + if sp == -1 { + return "", "", s, ErrHeader + } + + // Parse the first token as a decimal integer. + n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int + if perr != nil || n < 5 || int64(len(s)) < n { + return "", "", s, ErrHeader + } + + // Extract everything between the space and the final newline. + rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:] + if nl != "\n" { + return "", "", s, ErrHeader + } + + // The first equals separates the key from the value. + eq := strings.IndexByte(rec, '=') + if eq == -1 { + return "", "", s, ErrHeader + } + return rec[:eq], rec[eq+1:], rem, nil +} + +// parseString parses bytes as a NUL-terminated C-style string. // If a NUL byte is not found then the whole slice is returned as a string. -func cString(b []byte) string { +func (*parser) parseString(b []byte) string { n := 0 for n < len(b) && b[n] != 0 { n++ @@ -446,19 +480,51 @@ func cString(b []byte) string { return string(b[0:n]) } -func (tr *Reader) octal(b []byte) int64 { - // Check for binary format first. +// parseNumeric parses the input as being encoded in either base-256 or octal. +// This function may return negative numbers. +// If parsing fails or an integer overflow occurs, err will be set. +func (p *parser) parseNumeric(b []byte) int64 { + // Check for base-256 (binary) format first. + // If the first bit is set, then all following bits constitute a two's + // complement encoded number in big-endian byte order. if len(b) > 0 && b[0]&0x80 != 0 { - var x int64 - for i, c := range b { - if i == 0 { - c &= 0x7f // ignore signal bit in first byte - } - x = x<<8 | int64(c) + // Handling negative numbers relies on the following identity: + // -a-1 == ^a + // + // If the number is negative, we use an inversion mask to invert the + // data bytes and treat the value as an unsigned number. + var inv byte // 0x00 if positive or zero, 0xff if negative + if b[0]&0x40 != 0 { + inv = 0xff } - return x + + var x uint64 + for i, c := range b { + c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing + if i == 0 { + c &= 0x7f // Ignore signal bit in first byte + } + if (x >> 56) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + x = x<<8 | uint64(c) + } + if (x >> 63) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + if inv == 0xff { + return ^int64(x) + } + return int64(x) } + // Normal case is base-8 (octal) format. + return p.parseOctal(b) +} + +func (p *parser) parseOctal(b []byte) int64 { // Because unused fields are filled with NULs, we need // to skip leading NULs. Fields may also be padded with // spaces or NULs. @@ -469,27 +535,55 @@ func (tr *Reader) octal(b []byte) int64 { if len(b) == 0 { return 0 } - x, err := strconv.ParseUint(cString(b), 8, 64) - if err != nil { - tr.err = err + x, perr := strconv.ParseUint(p.parseString(b), 8, 64) + if perr != nil { + p.err = ErrHeader } return int64(x) } -// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding. -func (tr *Reader) skipUnread() { - nr := tr.numBytes() + tr.pad // number of bytes to skip +// skipUnread skips any unread bytes in the existing file entry, as well as any +// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is +// encountered in the data portion; it is okay to hit io.EOF in the padding. +// +// Note that this function still works properly even when sparse files are being +// used since numBytes returns the bytes remaining in the underlying io.Reader. +func (tr *Reader) skipUnread() error { + dataSkip := tr.numBytes() // Number of data bytes to skip + totalSkip := dataSkip + tr.pad // Total number of bytes to skip tr.curr, tr.pad = nil, 0 if tr.RawAccounting { - _, tr.err = io.CopyN(tr.rawBytes, tr.r, nr) - return + _, tr.err = io.CopyN(tr.rawBytes, tr.r, totalSkip) + return tr.err } - if sr, ok := tr.r.(io.Seeker); ok { - if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil { - return + // If possible, Seek to the last byte before the end of the data section. + // Do this because Seek is often lazy about reporting errors; this will mask + // the fact that the tar stream may be truncated. We can rely on the + // io.CopyN done shortly afterwards to trigger any IO errors. + var seekSkipped int64 // Number of bytes skipped via Seek + if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 { + // Not all io.Seeker can actually Seek. For example, os.Stdin implements + // io.Seeker, but calling Seek always returns an error and performs + // no action. Thus, we try an innocent seek to the current position + // to see if Seek is really supported. + pos1, err := sr.Seek(0, os.SEEK_CUR) + if err == nil { + // Seek seems supported, so perform the real Seek. + pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR) + if err != nil { + tr.err = err + return tr.err + } + seekSkipped = pos2 - pos1 } } - _, tr.err = io.CopyN(ioutil.Discard, tr.r, nr) + + var copySkipped int64 // Number of bytes skipped via CopyN + copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped) + if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip { + tr.err = io.ErrUnexpectedEOF + } + return tr.err } func (tr *Reader) verifyChecksum(header []byte) bool { @@ -497,11 +591,19 @@ func (tr *Reader) verifyChecksum(header []byte) bool { return false } - given := tr.octal(header[148:156]) + var p parser + given := p.parseOctal(header[148:156]) unsigned, signed := checksum(header) - return given == unsigned || given == signed + return p.err == nil && (given == unsigned || given == signed) } +// readHeader reads the next block header and assumes that the underlying reader +// is already aligned to a block boundary. +// +// The err will be set to io.EOF only when one of the following occurs: +// * Exactly 0 bytes are read and EOF is hit. +// * Exactly 1 block of zeros is read and EOF is hit. +// * At least 2 blocks of zeros are read. func (tr *Reader) readHeader() *Header { header := tr.hdrBuff[:] copy(header, zeroBlock) @@ -513,7 +615,7 @@ func (tr *Reader) readHeader() *Header { return nil } } - return nil + return nil // io.EOF is okay here } if tr.RawAccounting { if _, tr.err = tr.rawBytes.Write(header); tr.err != nil { @@ -530,7 +632,7 @@ func (tr *Reader) readHeader() *Header { return nil } } - return nil + return nil // io.EOF is okay here } if tr.RawAccounting { if _, tr.err = tr.rawBytes.Write(header); tr.err != nil { @@ -551,22 +653,19 @@ func (tr *Reader) readHeader() *Header { } // Unpack + var p parser hdr := new(Header) s := slicer(header) - hdr.Name = cString(s.next(100)) - hdr.Mode = tr.octal(s.next(8)) - hdr.Uid = int(tr.octal(s.next(8))) - hdr.Gid = int(tr.octal(s.next(8))) - hdr.Size = tr.octal(s.next(12)) - if hdr.Size < 0 { - tr.err = ErrHeader - return nil - } - hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0) + hdr.Name = p.parseString(s.next(100)) + hdr.Mode = p.parseNumeric(s.next(8)) + hdr.Uid = int(p.parseNumeric(s.next(8))) + hdr.Gid = int(p.parseNumeric(s.next(8))) + hdr.Size = p.parseNumeric(s.next(12)) + hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0) s.next(8) // chksum hdr.Typeflag = s.next(1)[0] - hdr.Linkname = cString(s.next(100)) + hdr.Linkname = p.parseString(s.next(100)) // The remainder of the header depends on the value of magic. // The original (v7) version of tar had no explicit magic field, @@ -586,70 +685,76 @@ func (tr *Reader) readHeader() *Header { switch format { case "posix", "gnu", "star": - hdr.Uname = cString(s.next(32)) - hdr.Gname = cString(s.next(32)) + hdr.Uname = p.parseString(s.next(32)) + hdr.Gname = p.parseString(s.next(32)) devmajor := s.next(8) devminor := s.next(8) if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { - hdr.Devmajor = tr.octal(devmajor) - hdr.Devminor = tr.octal(devminor) + hdr.Devmajor = p.parseNumeric(devmajor) + hdr.Devminor = p.parseNumeric(devminor) } var prefix string switch format { case "posix", "gnu": - prefix = cString(s.next(155)) + prefix = p.parseString(s.next(155)) case "star": - prefix = cString(s.next(131)) - hdr.AccessTime = time.Unix(tr.octal(s.next(12)), 0) - hdr.ChangeTime = time.Unix(tr.octal(s.next(12)), 0) + prefix = p.parseString(s.next(131)) + hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0) + hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0) } if len(prefix) > 0 { hdr.Name = prefix + "/" + hdr.Name } } - if tr.err != nil { + if p.err != nil { + tr.err = p.err + return nil + } + + nb := hdr.Size + if isHeaderOnlyType(hdr.Typeflag) { + nb = 0 + } + if nb < 0 { tr.err = ErrHeader return nil } - // Maximum value of hdr.Size is 64 GB (12 octal digits), - // so there's no risk of int64 overflowing. - nb := int64(hdr.Size) - tr.pad = -nb & (blockSize - 1) // blockSize is a power of two - // Set the current file reader. + tr.pad = -nb & (blockSize - 1) // blockSize is a power of two tr.curr = ®FileReader{r: tr.r, nb: nb} // Check for old GNU sparse format entry. if hdr.Typeflag == TypeGNUSparse { // Get the real size of the file. - hdr.Size = tr.octal(header[483:495]) + hdr.Size = p.parseNumeric(header[483:495]) + if p.err != nil { + tr.err = p.err + return nil + } // Read the sparse map. sp := tr.readOldGNUSparseMap(header) if tr.err != nil { return nil } + // Current file is a GNU sparse file. Update the current file reader. - tr.curr = &sparseFileReader{rfr: tr.curr.(*regFileReader), sp: sp, tot: hdr.Size} + tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size) + if tr.err != nil { + return nil + } } return hdr } -// A sparseEntry holds a single entry in a sparse file's sparse map. -// A sparse entry indicates the offset and size in a sparse file of a -// block of data. -type sparseEntry struct { - offset int64 - numBytes int64 -} - // readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format. // The sparse map is stored in the tar header if it's small enough. If it's larger than four entries, // then one or more extension headers are used to store the rest of the sparse map. func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry { + var p parser isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0 spCap := oldGNUSparseMainHeaderNumEntries if isExtended { @@ -660,10 +765,10 @@ func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry { // Read the four entries from the main tar header for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ { - offset := tr.octal(s.next(oldGNUSparseOffsetSize)) - numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize)) - if tr.err != nil { - tr.err = ErrHeader + offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize)) + numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize)) + if p.err != nil { + tr.err = p.err return nil } if offset == 0 && numBytes == 0 { @@ -687,10 +792,10 @@ func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry { isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0 s = slicer(sparseHeader) for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ { - offset := tr.octal(s.next(oldGNUSparseOffsetSize)) - numBytes := tr.octal(s.next(oldGNUSparseNumBytesSize)) - if tr.err != nil { - tr.err = ErrHeader + offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize)) + numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize)) + if p.err != nil { + tr.err = p.err return nil } if offset == 0 && numBytes == 0 { @@ -702,134 +807,111 @@ func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry { return sp } -// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format version 1.0. -// The sparse map is stored just before the file data and padded out to the nearest block boundary. +// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format +// version 1.0. The format of the sparse map consists of a series of +// newline-terminated numeric fields. The first field is the number of entries +// and is always present. Following this are the entries, consisting of two +// fields (offset, numBytes). This function must stop reading at the end +// boundary of the block containing the last newline. +// +// Note that the GNU manual says that numeric values should be encoded in octal +// format. However, the GNU tar utility itself outputs these values in decimal. +// As such, this library treats values as being encoded in decimal. func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { - buf := make([]byte, 2*blockSize) - sparseHeader := buf[:blockSize] + var cntNewline int64 + var buf bytes.Buffer + var blk = make([]byte, blockSize) - // readDecimal is a helper function to read a decimal integer from the sparse map - // while making sure to read from the file in blocks of size blockSize - readDecimal := func() (int64, error) { - // Look for newline - nl := bytes.IndexByte(sparseHeader, '\n') - if nl == -1 { - if len(sparseHeader) >= blockSize { - // This is an error - return 0, ErrHeader + // feedTokens copies data in numBlock chunks from r into buf until there are + // at least cnt newlines in buf. It will not read more blocks than needed. + var feedTokens = func(cnt int64) error { + for cntNewline < cnt { + if _, err := io.ReadFull(r, blk); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err } - oldLen := len(sparseHeader) - newLen := oldLen + blockSize - if cap(sparseHeader) < newLen { - // There's more header, but we need to make room for the next block - copy(buf, sparseHeader) - sparseHeader = buf[:newLen] - } else { - // There's more header, and we can just reslice - sparseHeader = sparseHeader[:newLen] - } - - // Now that sparseHeader is large enough, read next block - if _, err := io.ReadFull(r, sparseHeader[oldLen:newLen]); err != nil { - return 0, err - } - // leaving this function for io.Reader makes it more testable - if tr, ok := r.(*Reader); ok && tr.RawAccounting { - if _, err := tr.rawBytes.Write(sparseHeader[oldLen:newLen]); err != nil { - return 0, err + buf.Write(blk) + for _, c := range blk { + if c == '\n' { + cntNewline++ } } - - // Look for a newline in the new data - nl = bytes.IndexByte(sparseHeader[oldLen:newLen], '\n') - if nl == -1 { - // This is an error - return 0, ErrHeader - } - nl += oldLen // We want the position from the beginning } - // Now that we've found a newline, read a number - n, err := strconv.ParseInt(string(sparseHeader[:nl]), 10, 0) - if err != nil { - return 0, ErrHeader - } - - // Update sparseHeader to consume this number - sparseHeader = sparseHeader[nl+1:] - return n, nil + return nil } - // Read the first block - if _, err := io.ReadFull(r, sparseHeader); err != nil { + // nextToken gets the next token delimited by a newline. This assumes that + // at least one newline exists in the buffer. + var nextToken = func() string { + cntNewline-- + tok, _ := buf.ReadString('\n') + return tok[:len(tok)-1] // Cut off newline + } + + // Parse for the number of entries. + // Use integer overflow resistant math to check this. + if err := feedTokens(1); err != nil { return nil, err } - // leaving this function for io.Reader makes it more testable - if tr, ok := r.(*Reader); ok && tr.RawAccounting { - if _, err := tr.rawBytes.Write(sparseHeader); err != nil { - return nil, err - } + numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader } - // The first line contains the number of entries - numEntries, err := readDecimal() - if err != nil { + // Parse for all member entries. + // numEntries is trusted after this since a potential attacker must have + // committed resources proportional to what this library used. + if err := feedTokens(2 * numEntries); err != nil { return nil, err } - - // Read all the entries sp := make([]sparseEntry, 0, numEntries) for i := int64(0); i < numEntries; i++ { - // Read the offset - offset, err := readDecimal() + offset, err := strconv.ParseInt(nextToken(), 10, 64) if err != nil { - return nil, err + return nil, ErrHeader } - // Read numBytes - numBytes, err := readDecimal() + numBytes, err := strconv.ParseInt(nextToken(), 10, 64) if err != nil { - return nil, err + return nil, ErrHeader } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) } - return sp, nil } -// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format version 0.1. -// The sparse map is stored in the PAX headers. -func readGNUSparseMap0x1(headers map[string]string) ([]sparseEntry, error) { - // Get number of entries - numEntriesStr, ok := headers[paxGNUSparseNumBlocks] - if !ok { - return nil, ErrHeader - } - numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) - if err != nil { +// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format +// version 0.1. The sparse map is stored in the PAX headers. +func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) { + // Get number of entries. + // Use integer overflow resistant math to check this. + numEntriesStr := extHdrs[paxGNUSparseNumBlocks] + numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { return nil, ErrHeader } - sparseMap := strings.Split(headers[paxGNUSparseMap], ",") - - // There should be two numbers in sparseMap for each entry + // There should be two numbers in sparseMap for each entry. + sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",") if int64(len(sparseMap)) != 2*numEntries { return nil, ErrHeader } - // Loop through the entries in the sparse map + // Loop through the entries in the sparse map. + // numEntries is trusted now. sp := make([]sparseEntry, 0, numEntries) for i := int64(0); i < numEntries; i++ { - offset, err := strconv.ParseInt(sparseMap[2*i], 10, 0) + offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64) if err != nil { return nil, ErrHeader } - numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 0) + numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64) if err != nil { return nil, ErrHeader } sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) } - return sp, nil } @@ -846,10 +928,18 @@ func (tr *Reader) numBytes() int64 { // Read reads from the current entry in the tar archive. // It returns 0, io.EOF when it reaches the end of that entry, // until Next is called to advance to the next entry. +// +// Calling Read on special types like TypeLink, TypeSymLink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what +// the Header.Size claims. func (tr *Reader) Read(b []byte) (n int, err error) { + if tr.err != nil { + return 0, tr.err + } if tr.curr == nil { return 0, io.EOF } + n, err = tr.curr.Read(b) if err != nil && err != io.EOF { tr.err = err @@ -879,9 +969,33 @@ func (rfr *regFileReader) numBytes() int64 { return rfr.nb } -// readHole reads a sparse file hole ending at offset toOffset -func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int { - n64 := toOffset - sfr.pos +// newSparseFileReader creates a new sparseFileReader, but validates all of the +// sparse entries before doing so. +func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) { + if total < 0 { + return nil, ErrHeader // Total size cannot be negative + } + + // Validate all sparse entries. These are the same checks as performed by + // the BSD tar utility. + for i, s := range sp { + switch { + case s.offset < 0 || s.numBytes < 0: + return nil, ErrHeader // Negative values are never okay + case s.offset > math.MaxInt64-s.numBytes: + return nil, ErrHeader // Integer overflow with large length + case s.offset+s.numBytes > total: + return nil, ErrHeader // Region extends beyond the "real" size + case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset: + return nil, ErrHeader // Regions can't overlap and must be in order + } + } + return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil +} + +// readHole reads a sparse hole ending at endOffset. +func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int { + n64 := endOffset - sfr.pos if n64 > int64(len(b)) { n64 = int64(len(b)) } @@ -895,49 +1009,54 @@ func (sfr *sparseFileReader) readHole(b []byte, toOffset int64) int { // Read reads the sparse file data in expanded form. func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { - if len(sfr.sp) == 0 { - // No more data fragments to read from. - if sfr.pos < sfr.tot { - // We're in the last hole - n = sfr.readHole(b, sfr.tot) - return - } - // Otherwise, we're at the end of the file - return 0, io.EOF - } - if sfr.tot < sfr.sp[0].offset { - return 0, io.ErrUnexpectedEOF - } - if sfr.pos < sfr.sp[0].offset { - // We're in a hole - n = sfr.readHole(b, sfr.sp[0].offset) - return + // Skip past all empty fragments. + for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 { + sfr.sp = sfr.sp[1:] } - // We're not in a hole, so we'll read from the next data fragment - posInFragment := sfr.pos - sfr.sp[0].offset - bytesLeft := sfr.sp[0].numBytes - posInFragment + // If there are no more fragments, then it is possible that there + // is one last sparse hole. + if len(sfr.sp) == 0 { + // This behavior matches the BSD tar utility. + // However, GNU tar stops returning data even if sfr.total is unmet. + if sfr.pos < sfr.total { + return sfr.readHole(b, sfr.total), nil + } + return 0, io.EOF + } + + // In front of a data fragment, so read a hole. + if sfr.pos < sfr.sp[0].offset { + return sfr.readHole(b, sfr.sp[0].offset), nil + } + + // In a data fragment, so read from it. + // This math is overflow free since we verify that offset and numBytes can + // be safely added when creating the sparseFileReader. + endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment + bytesLeft := endPos - sfr.pos // Bytes left in fragment if int64(len(b)) > bytesLeft { - b = b[0:bytesLeft] + b = b[:bytesLeft] } n, err = sfr.rfr.Read(b) sfr.pos += int64(n) - - if int64(n) == bytesLeft { - // We're done with this fragment - sfr.sp = sfr.sp[1:] + if err == io.EOF { + if sfr.pos < endPos { + err = io.ErrUnexpectedEOF // There was supposed to be more data + } else if sfr.pos < sfr.total { + err = nil // There is still an implicit sparse hole at the end + } } - if err == io.EOF && sfr.pos < sfr.tot { - // We reached the end of the last fragment's data, but there's a final hole - err = nil + if sfr.pos == endPos { + sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it } - return + return n, err } // numBytes returns the number of bytes left to read in the sparse file's // sparse-encoded data in the tar archive. func (sfr *sparseFileReader) numBytes() int64 { - return sfr.rfr.nb + return sfr.rfr.numBytes() } diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/writer.go b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go index 9dbc01a2..04263817 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/writer.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go @@ -12,8 +12,8 @@ import ( "errors" "fmt" "io" - "os" "path" + "sort" "strconv" "strings" "time" @@ -23,7 +23,6 @@ var ( ErrWriteTooLong = errors.New("archive/tar: write too long") ErrFieldTooLong = errors.New("archive/tar: header field too long") ErrWriteAfterClose = errors.New("archive/tar: write after close") - errNameTooLong = errors.New("archive/tar: name too long") errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") ) @@ -43,6 +42,10 @@ type Writer struct { paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header } +type formatter struct { + err error // Last error seen +} + // NewWriter creates a new Writer writing to w. func NewWriter(w io.Writer) *Writer { return &Writer{w: w} } @@ -69,17 +72,9 @@ func (tw *Writer) Flush() error { } // Write s into b, terminating it with a NUL if there is room. -// If the value is too long for the field and allowPax is true add a paxheader record instead -func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, paxHeaders map[string]string) { - needsPaxHeader := allowPax && len(s) > len(b) || !isASCII(s) - if needsPaxHeader { - paxHeaders[paxKeyword] = s - return - } +func (f *formatter) formatString(b []byte, s string) { if len(s) > len(b) { - if tw.err == nil { - tw.err = ErrFieldTooLong - } + f.err = ErrFieldTooLong return } ascii := toASCII(s) @@ -90,40 +85,40 @@ func (tw *Writer) cString(b []byte, s string, allowPax bool, paxKeyword string, } // Encode x as an octal ASCII string and write it into b with leading zeros. -func (tw *Writer) octal(b []byte, x int64) { +func (f *formatter) formatOctal(b []byte, x int64) { s := strconv.FormatInt(x, 8) // leading zeros, but leave room for a NUL. for len(s)+1 < len(b) { s = "0" + s } - tw.cString(b, s, false, paxNone, nil) + f.formatString(b, s) } -// Write x into b, either as octal or as binary (GNUtar/star extension). -// If the value is too long for the field and writingPax is enabled both for the field and the add a paxheader record instead -func (tw *Writer) numeric(b []byte, x int64, allowPax bool, paxKeyword string, paxHeaders map[string]string) { - // Try octal first. - s := strconv.FormatInt(x, 8) - if len(s) < len(b) { - tw.octal(b, x) +// fitsInBase256 reports whether x can be encoded into n bytes using base-256 +// encoding. Unlike octal encoding, base-256 encoding does not require that the +// string ends with a NUL character. Thus, all n bytes are available for output. +// +// If operating in binary mode, this assumes strict GNU binary mode; which means +// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is +// equivalent to the sign bit in two's complement form. +func fitsInBase256(n int, x int64) bool { + var binBits = uint(n-1) * 8 + return n >= 9 || (x >= -1<= 0; i-- { + b[i] = byte(x) + x >>= 8 + } + b[0] |= 0x80 // Highest bit indicates binary format return } - // If it is too long for octal, and pax is preferred, use a pax header - if allowPax && tw.preferPax { - tw.octal(b, 0) - s := strconv.FormatInt(x, 10) - paxHeaders[paxKeyword] = s - return - } - - // Too big: use binary (big-endian). - tw.usedBinary = true - for i := len(b) - 1; x > 0 && i >= 0; i-- { - b[i] = byte(x) - x >>= 8 - } - b[0] |= 0x80 // highest bit indicates binary format + f.formatOctal(b, 0) // Last resort, just write zero + f.err = ErrFieldTooLong } var ( @@ -162,6 +157,7 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { // subsecond time resolution, but for now let's just capture // too long fields or non ascii characters + var f formatter var header []byte // We need to select which scratch buffer to use carefully, @@ -176,10 +172,40 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { copy(header, zeroBlock) s := slicer(header) + // Wrappers around formatter that automatically sets paxHeaders if the + // argument extends beyond the capacity of the input byte slice. + var formatString = func(b []byte, s string, paxKeyword string) { + needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s) + if needsPaxHeader { + paxHeaders[paxKeyword] = s + return + } + f.formatString(b, s) + } + var formatNumeric = func(b []byte, x int64, paxKeyword string) { + // Try octal first. + s := strconv.FormatInt(x, 8) + if len(s) < len(b) { + f.formatOctal(b, x) + return + } + + // If it is too long for octal, and PAX is preferred, use a PAX header. + if paxKeyword != paxNone && tw.preferPax { + f.formatOctal(b, 0) + s := strconv.FormatInt(x, 10) + paxHeaders[paxKeyword] = s + return + } + + tw.usedBinary = true + f.formatNumeric(b, x) + } + // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax pathHeaderBytes := s.next(fileNameSize) - tw.cString(pathHeaderBytes, hdr.Name, true, paxPath, paxHeaders) + formatString(pathHeaderBytes, hdr.Name, paxPath) // Handle out of range ModTime carefully. var modTime int64 @@ -187,25 +213,25 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { modTime = hdr.ModTime.Unix() } - tw.octal(s.next(8), hdr.Mode) // 100:108 - tw.numeric(s.next(8), int64(hdr.Uid), true, paxUid, paxHeaders) // 108:116 - tw.numeric(s.next(8), int64(hdr.Gid), true, paxGid, paxHeaders) // 116:124 - tw.numeric(s.next(12), hdr.Size, true, paxSize, paxHeaders) // 124:136 - tw.numeric(s.next(12), modTime, false, paxNone, nil) // 136:148 --- consider using pax for finer granularity - s.next(8) // chksum (148:156) - s.next(1)[0] = hdr.Typeflag // 156:157 + f.formatOctal(s.next(8), hdr.Mode) // 100:108 + formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116 + formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124 + formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136 + formatNumeric(s.next(12), modTime, paxNone) // 136:148 --- consider using pax for finer granularity + s.next(8) // chksum (148:156) + s.next(1)[0] = hdr.Typeflag // 156:157 - tw.cString(s.next(100), hdr.Linkname, true, paxLinkpath, paxHeaders) + formatString(s.next(100), hdr.Linkname, paxLinkpath) - copy(s.next(8), []byte("ustar\x0000")) // 257:265 - tw.cString(s.next(32), hdr.Uname, true, paxUname, paxHeaders) // 265:297 - tw.cString(s.next(32), hdr.Gname, true, paxGname, paxHeaders) // 297:329 - tw.numeric(s.next(8), hdr.Devmajor, false, paxNone, nil) // 329:337 - tw.numeric(s.next(8), hdr.Devminor, false, paxNone, nil) // 337:345 + copy(s.next(8), []byte("ustar\x0000")) // 257:265 + formatString(s.next(32), hdr.Uname, paxUname) // 265:297 + formatString(s.next(32), hdr.Gname, paxGname) // 297:329 + formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337 + formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345 // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax prefixHeaderBytes := s.next(155) - tw.cString(prefixHeaderBytes, "", false, paxNone, nil) // 345:500 prefix + formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix // Use the GNU magic instead of POSIX magic if we used any GNU extensions. if tw.usedBinary { @@ -215,37 +241,26 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { _, paxPathUsed := paxHeaders[paxPath] // try to use a ustar header when only the name is too long if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { - suffix := hdr.Name - prefix := "" - if len(hdr.Name) > fileNameSize && isASCII(hdr.Name) { - var err error - prefix, suffix, err = tw.splitUSTARLongName(hdr.Name) - if err == nil { - // ok we can use a ustar long name instead of pax, now correct the fields + prefix, suffix, ok := splitUSTARPath(hdr.Name) + if ok { + // Since we can encode in USTAR format, disable PAX header. + delete(paxHeaders, paxPath) - // remove the path field from the pax header. this will suppress the pax header - delete(paxHeaders, paxPath) - - // update the path fields - tw.cString(pathHeaderBytes, suffix, false, paxNone, nil) - tw.cString(prefixHeaderBytes, prefix, false, paxNone, nil) - - // Use the ustar magic if we used ustar long names. - if len(prefix) > 0 && !tw.usedBinary { - copy(header[257:265], []byte("ustar\x00")) - } - } + // Update the path fields + formatString(pathHeaderBytes, suffix, paxNone) + formatString(prefixHeaderBytes, prefix, paxNone) } } // The chksum field is terminated by a NUL and a space. // This is different from the other octal fields. chksum, _ := checksum(header) - tw.octal(header[148:155], chksum) + f.formatOctal(header[148:155], chksum) // Never fails header[155] = ' ' - if tw.err != nil { - // problem with header; probably integer too big for a field. + // Check if there were any formatting errors. + if f.err != nil { + tw.err = f.err return tw.err } @@ -270,28 +285,25 @@ func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { return tw.err } -// writeUSTARLongName splits a USTAR long name hdr.Name. -// name must be < 256 characters. errNameTooLong is returned -// if hdr.Name can't be split. The splitting heuristic -// is compatible with gnu tar. -func (tw *Writer) splitUSTARLongName(name string) (prefix, suffix string, err error) { +// splitUSTARPath splits a path according to USTAR prefix and suffix rules. +// If the path is not splittable, then it will return ("", "", false). +func splitUSTARPath(name string) (prefix, suffix string, ok bool) { length := len(name) - if length > fileNamePrefixSize+1 { + if length <= fileNameSize || !isASCII(name) { + return "", "", false + } else if length > fileNamePrefixSize+1 { length = fileNamePrefixSize + 1 } else if name[length-1] == '/' { length-- } + i := strings.LastIndex(name[:length], "/") - // nlen contains the resulting length in the name field. - // plen contains the resulting length in the prefix field. - nlen := len(name) - i - 1 - plen := i + nlen := len(name) - i - 1 // nlen is length of suffix + plen := i // plen is length of prefix if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize { - err = errNameTooLong - return + return "", "", false } - prefix, suffix = name[:i], name[i+1:] - return + return name[:i], name[i+1:], true } // writePaxHeader writes an extended pax header to the @@ -304,11 +316,11 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) erro // succeed, and seems harmless enough. ext.ModTime = hdr.ModTime // The spec asks that we namespace our pseudo files - // with the current pid. - pid := os.Getpid() + // with the current pid. However, this results in differing outputs + // for identical inputs. As such, the constant 0 is now used instead. + // golang.org/issue/12358 dir, file := path.Split(hdr.Name) - fullName := path.Join(dir, - fmt.Sprintf("PaxHeaders.%d", pid), file) + fullName := path.Join(dir, "PaxHeaders.0", file) ascii := toASCII(fullName) if len(ascii) > 100 { @@ -318,8 +330,15 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) erro // Construct the body var buf bytes.Buffer - for k, v := range paxHeaders { - fmt.Fprint(&buf, paxHeader(k+"="+v)) + // Keys are sorted before writing to body to allow deterministic output. + var keys []string + for k := range paxHeaders { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k])) } ext.Size = int64(len(buf.Bytes())) @@ -335,17 +354,18 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) erro return nil } -// paxHeader formats a single pax record, prefixing it with the appropriate length -func paxHeader(msg string) string { - const padding = 2 // Extra padding for space and newline - size := len(msg) + padding +// formatPAXRecord formats a single PAX record, prefixing it with the +// appropriate length. +func formatPAXRecord(k, v string) string { + const padding = 3 // Extra padding for ' ', '=', and '\n' + size := len(k) + len(v) + padding size += len(strconv.Itoa(size)) - record := fmt.Sprintf("%d %s\n", size, msg) + record := fmt.Sprintf("%d %s=%s\n", size, k, v) + + // Final adjustment if adding size field increased the record size. if len(record) != size { - // Final adjustment if adding size increased - // the number of digits in size size = len(record) - record = fmt.Sprintf("%d %s\n", size, msg) + record = fmt.Sprintf("%d %s=%s\n", size, k, v) } return record }