diff --git a/cmd/power/power.go b/cmd/power/power.go index fa2d3069..ab849e01 100644 --- a/cmd/power/power.go +++ b/cmd/power/power.go @@ -42,7 +42,9 @@ func runDocker(name string) error { existing, err := client.ContainerInspect(context.Background(), name) if err == nil && existing.ID != "" { - err := client.ContainerRemove(context.Background(), existing.ID, types.ContainerRemoveOptions{}) + err := client.ContainerRemove(context.Background(), types.ContainerRemoveOptions{ + ContainerID: existing.ID, + }) if err != nil { return err @@ -79,10 +81,11 @@ func runDocker(name string) error { } go func() { - client.ContainerAttach(context.Background(), powerContainer.ID, types.ContainerAttachOptions{ - Stream: true, - Stderr: true, - Stdout: true, + client.ContainerAttach(context.Background(), types.ContainerAttachOptions{ + ContainerID: powerContainer.ID, + Stream: true, + Stderr: true, + Stdout: true, }) }() diff --git a/cmd/userdocker/main.go b/cmd/userdocker/main.go index a0cd9932..d56977a5 100644 --- a/cmd/userdocker/main.go +++ b/cmd/userdocker/main.go @@ -93,7 +93,8 @@ func startDocker(cfg *config.CloudConfig) error { cmd = append(cmd, args...) log.Infof("Running %v", cmd) - resp, err := client.ContainerExecCreate(context.Background(), storageContext, types.ExecConfig{ + resp, err := client.ContainerExecCreate(context.Background(), types.ExecConfig{ + Container: storageContext, Privileged: true, AttachStdin: true, AttachStderr: true, diff --git a/trash.conf b/trash.conf new file mode 100644 index 00000000..06af77f1 --- /dev/null +++ b/trash.conf @@ -0,0 +1,54 @@ +github.com/Microsoft/go-winio v0.1.0 +github.com/RackSec/srslog 259aed10dfa74ea2961eddd1d9847619f6e98837 +github.com/Sirupsen/logrus v0.9.0 +github.com/boltdb/bolt v1.2.0 +github.com/cloudfoundry-incubator/candiedyaml 01cbc92901719f599b11f3a7e3b1768d7002b0bb https://github.com/rancher/candiedyaml +github.com/cloudfoundry/gosigar 3ed7c74352dae6dc00bdc8c74045375352e3ec05 +github.com/codegangsta/cli 95199f812193f6f1e8bbe0a916d9f3ed50729909 https://github.com/ibuildthecloud/cli-1.git +github.com/coreos/coreos-cloudinit dede20ddbc6a3fea5cada4dc3bfa2ebed1a6cbbc https://github.com/rancher/coreos-cloudinit.git +github.com/coreos/go-systemd v4 +github.com/coreos/yaml 6b16a5714269b2f70720a45406b1babd947a17ef +github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d +github.com/docker/containerd ebb6d97f443fdcbf7084e41356359c99421d93f5 https://github.com/ibuildthecloud/containerd.git +github.com/docker/distribution 467fc068d88aa6610691b7f1a677271a3fac4aac +github.com/docker/docker system-docker-1.11.1 https://github.com/ibuildthecloud/docker.git +github.com/docker/engine-api v0.3.3 +github.com/docker/go-connections v0.2.0 +github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3 +github.com/docker/libcompose 1d8297b120a32a5173cb5108b24ecab5ab14f9f1 https://github.com/rancher/libcompose.git +github.com/docker/libnetwork v0.5.6 +github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a +github.com/docker/machine 4a8e93ac9bc2ced1c3bc4a43c03fdaa1c2749205 +github.com/flynn/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff +github.com/godbus/dbus v3 +github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 +github.com/gorilla/context 14f550f51a +github.com/gorilla/mux e444e69cbd +github.com/imdario/mergo 0.2.1 +github.com/j-keck/arping 4f4d2c8983a18e2c9c63a3f339bc9a998c4557bc +github.com/kless/term d57d9d2d5be197e12d9dee142d855470d83ce62f +github.com/mattn/go-shellwords v1.0.0 +github.com/opencontainers/runc edc34c4a8c1e261b5ce926ff557ecde1aff19ce3 https://github.com/ibuildthecloud/runc.git +github.com/opencontainers/runtime-spec f955d90e70a98ddfb886bd930ffd076da9b67998 +github.com/opencontainers/specs f955d90e70a98ddfb886bd930ffd076da9b67998 +github.com/packethost/packngo 92012705236896736875186c9e49557897c6af90 https://github.com/ibuildthecloud/packngo.git +github.com/pmezard/go-difflib d8ed2627bdf02c080bf22230dbb337003b7aba2d +github.com/rancher/docker-from-scratch v1.10.3 +github.com/rancher/netconf 7880fdeac0923a05b86a0b5774b4dc96a5749d76 +github.com/rcrowley/go-metrics eeba7bd0dd01ace6e690fa833b3f22aaec29af43 +github.com/ryanuber/go-glob 0067a9abd927e50aed5190662702f81231413ae0 +github.com/seccomp/libseccomp-golang 1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1 +github.com/stretchr/testify a1f97990ddc16022ec7610326dd9bce31332c116 +github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 +github.com/tchap/go-patricia v2.1.0 +github.com/vbatts/tar-split v0.9.11 +github.com/vishvananda/netlink 631962935bff4f3d20ff32a72e8944f6d2836a26 +github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a +github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 +github.com/xeipuuv/gojsonschema ac452913faa25c08bb78810d3e6f88b8a39f8f25 +golang.org/x/crypto 2f3083f6163ef51179ad42ed523a18c9a1141467 +golang.org/x/net 991d3e32f76f19ee6d9caadb3a22eae8d23315f7 https://github.com/golang/net.git +golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git +google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 https://code.googlesource.com/gocloud +google.golang.org/grpc ab0be5212fb225475f2087566eded7da5d727960 https://github.com/grpc/grpc-go.git +gopkg.in/fsnotify.v1 v1.2.0 diff --git a/trash.yml b/trash.yml deleted file mode 100644 index 54d82f43..00000000 --- a/trash.yml +++ /dev/null @@ -1,117 +0,0 @@ -package: github.com/rancher/os - -import: -- package: github.com/Sirupsen/logrus - version: v0.9.0 - -- package: github.com/cloudfoundry-incubator/candiedyaml - version: 01cbc92901719f599b11f3a7e3b1768d7002b0bb - repo: https://github.com/rancher/candiedyaml - -- package: github.com/codegangsta/cli - version: 839f07bfe4819fa1434fa907d0804ce6ec45a5df - -- package: github.com/coreos/coreos-cloudinit - version: 65031e1ab2d3574544d26f5b5d7ddddd0032fd00 - repo: https://github.com/rancher/coreos-cloudinit.git - -- package: github.com/coreos/go-systemd - version: 4fbc5060a317b142e6c7bfbedb65596d5f0ab99b - -- package: github.com/coreos/yaml - version: 6b16a5714269b2f70720a45406b1babd947a17ef - -- package: github.com/docker/distribution - version: 467fc068d88aa6610691b7f1a677271a3fac4aac - -- package: github.com/docker/docker - version: 9837ec4da53f15f9120d53a6e1517491ba8b0261 - -- package: github.com/docker/engine-api - version: fd7f99d354831e7e809386087e7ec3129fdb1520 - -- package: github.com/docker/go-connections - version: v0.2.0 - -- package: github.com/docker/go-units - version: 651fc226e7441360384da338d0fd37f2440ffbe3 - -- package: github.com/docker/libcompose - version: b4407312d861c21e0290d088deef15c2f8c8aa74 - repo: https://github.com/rancher/libcompose - -- package: github.com/docker/libcontainer - version: 83a102cc68a09d890cce3b6c2e5c14c49e6373a0 - -- package: github.com/docker/libnetwork - version: v0.5.6 - -- package: github.com/docker/machine - version: 4a8e93ac9bc2ced1c3bc4a43c03fdaa1c2749205 - -- package: github.com/flynn/go-shlex - version: 3f9db97f856818214da2e1057f8ad84803971cff - -- package: github.com/gorilla/context - version: 14f550f51af52180c2eefed15e5fd18d63c0a64a - -- package: github.com/gorilla/mux - version: e444e69cbd2e2e3e0749a2f3c717cec491552bbf - -- package: github.com/guelfey/go.dbus - version: f6a3a2366cc39b8479cadc499d3c735fb10fbdda - -- package: github.com/j-keck/arping - version: 4f4d2c8983a18e2c9c63a3f339bc9a998c4557bc - -- package: github.com/kless/term - version: d57d9d2d5be197e12d9dee142d855470d83ce62f - -- package: github.com/opencontainers/runc - version: 2441732d6fcc0fb0a542671a4372e0c7bc99c19e - -- package: github.com/rancher/docker-from-scratch - version: v1.10.3 - -- package: github.com/rancher/netconf - version: 7880fdeac0923a05b86a0b5774b4dc96a5749d76 - -- package: github.com/ryanuber/go-glob - version: 0067a9abd927e50aed5190662702f81231413ae0 - -- package: github.com/stretchr/testify - version: a1f97990ddc16022ec7610326dd9bce31332c116 - -- package: github.com/davecgh/go-spew - version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d - -- package: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - -- package: github.com/vishvananda/netlink - version: edcd99c0881a4de0fdb3818af6b24f4ee6948464 - -- package: golang.org/x/crypto - version: 2f3083f6163ef51179ad42ed523a18c9a1141467 - -- package: golang.org/x/net - version: 84ba27dd5b2d8135e9da1395277f2c9333a2ffda - -- package: google.golang.org/cloud - version: c97f5f9979a8582f3ab72873a51979619801248b - -- package: github.com/packethost/packngo - version: 92012705236896736875186c9e49557897c6af90 - repo: https://github.com/ibuildthecloud/packngo.git - -- package: github.com/vbatts/tar-split - version: v0.9.11 - -- package: github.com/xeipuuv/gojsonpointer - version: e0fe6f68307607d540ed8eac07a342c33fa1b54a - -- package: github.com/xeipuuv/gojsonreference - version: e02fc20de94c78484cd5ffb007f8af96be030a45 - -- package: github.com/xeipuuv/gojsonschema - version: ac452913faa25c08bb78810d3e6f88b8a39f8f25 diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE new file mode 100644 index 00000000..b8b569d7 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md new file mode 100644 index 00000000..478862a8 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -0,0 +1,15 @@ +# go-winio + +This repository contains utilities for efficiently performing Win32 IO operations in +Go. Currently, this is focused on accessing named pipes and other file handles, and +for using named pipes as a net transport. + +This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go +to reuse the thread to schedule another goroutine. This limits support to Windows Vista and +newer operating systems. This is similar to the implementation of network sockets in Go's net +package. + +Please see the LICENSE file for licensing information. + +Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe +for another named pipe implementation. diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go new file mode 100644 index 00000000..bfefd42c --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -0,0 +1,241 @@ +package winio + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "syscall" + "unicode/utf16" +) + +//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite + +const ( + BackupData = uint32(iota + 1) + BackupEaData + BackupSecurity + BackupAlternateData + BackupLink + BackupPropertyData + BackupObjectId + BackupReparseData + BackupSparseBlock + BackupTxfsData + + StreamSparseAttributes = uint32(8) +) + +// BackupHeader represents a backup stream of a file. +type BackupHeader struct { + Id uint32 // The backup stream ID + Attributes uint32 // Stream attributes + Size int64 // The size of the stream in bytes + Name string // The name of the stream (for BackupAlternateData only). + Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). +} + +type win32StreamId struct { + StreamId uint32 + Attributes uint32 + Size uint64 + NameSize uint32 +} + +// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series +// of BackupHeader values. +type BackupStreamReader struct { + r io.Reader + bytesLeft int64 +} + +// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. +func NewBackupStreamReader(r io.Reader) *BackupStreamReader { + return &BackupStreamReader{r, 0} +} + +// Next returns the next backup stream and prepares for calls to Write(). It skips the remainder of the current stream if +// it was not completely read. +func (r *BackupStreamReader) Next() (*BackupHeader, error) { + if r.bytesLeft > 0 { + if _, err := io.Copy(ioutil.Discard, r); err != nil { + return nil, err + } + } + var wsi win32StreamId + if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { + return nil, err + } + hdr := &BackupHeader{ + Id: wsi.StreamId, + Attributes: wsi.Attributes, + Size: int64(wsi.Size), + } + if wsi.NameSize != 0 { + name := make([]uint16, int(wsi.NameSize/2)) + if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { + return nil, err + } + hdr.Name = syscall.UTF16ToString(name) + } + if wsi.StreamId == BackupSparseBlock { + if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { + return nil, err + } + hdr.Size -= 8 + } + r.bytesLeft = hdr.Size + return hdr, nil +} + +// Read reads from the current backup stream. +func (r *BackupStreamReader) Read(b []byte) (int, error) { + if r.bytesLeft == 0 { + return 0, io.EOF + } + if int64(len(b)) > r.bytesLeft { + b = b[:r.bytesLeft] + } + n, err := r.r.Read(b) + r.bytesLeft -= int64(n) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if r.bytesLeft == 0 && err == nil { + err = io.EOF + } + return n, err +} + +// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. +type BackupStreamWriter struct { + w io.Writer + bytesLeft int64 +} + +// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. +func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { + return &BackupStreamWriter{w, 0} +} + +// WriteHeader writes the next backup stream header and prepares for calls to Write(). +func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { + if w.bytesLeft != 0 { + return fmt.Errorf("missing %d bytes", w.bytesLeft) + } + name := utf16.Encode([]rune(hdr.Name)) + wsi := win32StreamId{ + StreamId: hdr.Id, + Attributes: hdr.Attributes, + Size: uint64(hdr.Size), + NameSize: uint32(len(name) * 2), + } + if hdr.Id == BackupSparseBlock { + // Include space for the int64 block offset + wsi.Size += 8 + } + if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { + return err + } + if len(name) != 0 { + if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { + return err + } + } + if hdr.Id == BackupSparseBlock { + if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { + return err + } + } + w.bytesLeft = hdr.Size + return nil +} + +// Write writes to the current backup stream. +func (w *BackupStreamWriter) Write(b []byte) (int, error) { + if w.bytesLeft < int64(len(b)) { + return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) + } + n, err := w.w.Write(b) + w.bytesLeft -= int64(n) + return n, err +} + +// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. +type BackupFileReader struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, +// Read will attempt to read the security descriptor of the file. +func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { + r := &BackupFileReader{f, includeSecurity, 0} + runtime.SetFinalizer(r, func(r *BackupFileReader) { r.Close() }) + return r +} + +// Read reads a backup stream from the file by calling the Win32 API BackupRead(). +func (r *BackupFileReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + if err != nil { + return 0, &os.PathError{"BackupRead", r.f.Name(), err} + } + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees Win32 resources associated with the BackupFileReader. It does not close +// the underlying file. +func (r *BackupFileReader) Close() error { + if r.ctx != 0 { + backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + r.ctx = 0 + } + return nil +} + +// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. +type BackupFileWriter struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileWrtier returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// Write() will attempt to restore the security descriptor from the stream. +func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { + w := &BackupFileWriter{f, includeSecurity, 0} + runtime.SetFinalizer(w, func(w *BackupFileWriter) { w.Close() }) + return w +} + +// Write restores a portion of the file using the provided backup stream. +func (w *BackupFileWriter) Write(b []byte) (int, error) { + var bytesWritten uint32 + err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + if err != nil { + return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + } + if int(bytesWritten) != len(b) { + return int(bytesWritten), errors.New("not all bytes could be written") + } + return len(b), nil +} + +// Close frees Win32 resources associated with the BackupFileWriter. It does not +// close the underlying file. +func (w *BackupFileWriter) Close() error { + if w.ctx != 0 { + backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + w.ctx = 0 + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go new file mode 100644 index 00000000..fd16f007 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -0,0 +1,219 @@ +package winio + +import ( + "errors" + "io" + "runtime" + "sync" + "syscall" + "time" +) + +//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys timeBeginPeriod(period uint32) (n int32) = winmm.timeBeginPeriod + +const ( + cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +var ( + ErrFileClosed = errors.New("file has already been closed") + ErrTimeout = &timeoutError{} +) + +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } + +var ioInitOnce sync.Once +var ioCompletionPort syscall.Handle + +// ioResult contains the result of an asynchronous IO operation +type ioResult struct { + bytes uint32 + err error +} + +// ioOperation represents an outstanding asynchronous Win32 IO +type ioOperation struct { + o syscall.Overlapped + ch chan ioResult +} + +func initIo() { + h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + panic(err) + } + ioCompletionPort = h + go ioCompletionProcessor(h) +} + +// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. +// It takes ownership of this handle and will close it if it is garbage collected. +type win32File struct { + handle syscall.Handle + wg sync.WaitGroup + closing bool + readDeadline time.Time + writeDeadline time.Time +} + +// makeWin32File makes a new win32File from an existing file handle +func makeWin32File(h syscall.Handle) (*win32File, error) { + f := &win32File{handle: h} + ioInitOnce.Do(initIo) + _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) + if err != nil { + return nil, err + } + err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + if err != nil { + return nil, err + } + runtime.SetFinalizer(f, (*win32File).closeHandle) + return f, nil +} + +func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + return makeWin32File(h) +} + +// closeHandle closes the resources associated with a Win32 handle +func (f *win32File) closeHandle() { + if !f.closing { + // cancel all IO and wait for it to complete + f.closing = true + cancelIoEx(f.handle, nil) + f.wg.Wait() + // at this point, no new IO can start + syscall.Close(f.handle) + f.handle = 0 + } +} + +// Close closes a win32File. +func (f *win32File) Close() error { + f.closeHandle() + runtime.SetFinalizer(f, nil) + return nil +} + +// prepareIo prepares for a new IO operation +func (f *win32File) prepareIo() (*ioOperation, error) { + f.wg.Add(1) + if f.closing { + return nil, ErrFileClosed + } + c := &ioOperation{} + c.ch = make(chan ioResult) + return c, nil +} + +// ioCompletionProcessor processes completed async IOs forever +func ioCompletionProcessor(h syscall.Handle) { + // Set the timer resolution to 1. This fixes a performance regression in golang 1.6. + timeBeginPeriod(1) + for { + var bytes uint32 + var key uintptr + var op *ioOperation + err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + if op == nil { + panic(err) + } + op.ch <- ioResult{bytes, err} + } +} + +// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// the operation has actually completed. +func (f *win32File) asyncIo(c *ioOperation, deadline time.Time, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { + f.wg.Done() + return int(bytes), err + } else { + var r ioResult + wait := true + timedout := false + if f.closing { + cancelIoEx(f.handle, &c.o) + } else if !deadline.IsZero() { + now := time.Now() + if !deadline.After(now) { + timedout = true + } else { + timeout := time.After(deadline.Sub(now)) + select { + case r = <-c.ch: + wait = false + case <-timeout: + timedout = true + } + } + } + if timedout { + cancelIoEx(f.handle, &c.o) + } + if wait { + r = <-c.ch + } + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + if f.closing { + err = ErrFileClosed + } else if timedout { + err = ErrTimeout + } + } + f.wg.Done() + return int(r.bytes), err + } +} + +// Read reads from a file handle. +func (f *win32File) Read(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + var bytes uint32 + err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, f.readDeadline, bytes, err) + + // Handle EOF conditions. + if err == nil && n == 0 && len(b) != 0 { + return 0, io.EOF + } else if err == syscall.ERROR_BROKEN_PIPE { + return 0, io.EOF + } else { + return n, err + } +} + +// Write writes to a file handle. +func (f *win32File) Write(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + var bytes uint32 + err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + return f.asyncIo(c, f.writeDeadline, bytes, err) +} + +func (f *win32File) SetReadDeadline(t time.Time) error { + f.readDeadline = t + return nil +} + +func (f *win32File) SetWriteDeadline(t time.Time) error { + f.writeDeadline = t + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go new file mode 100644 index 00000000..dc05a8b3 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -0,0 +1,30 @@ +package winio + +import ( + "os" + "syscall" + "unsafe" +) + +//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx +//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle + +type FileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime + FileAttributes uintptr // includes padding +} + +func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { + bi := &FileBasicInfo{} + if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), 0, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return nil, &os.PathError{"GetFileInformationByHandleEx", f.Name(), err} + } + return bi, nil +} + +func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + if err := setFileInformationByHandle(syscall.Handle(f.Fd()), 0, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return &os.PathError{"SetFileInformationByHandle", f.Name(), err} + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go new file mode 100644 index 00000000..82db2830 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -0,0 +1,398 @@ +package winio + +import ( + "errors" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" +) + +//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW +//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW +//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW + +type securityAttributes struct { + Length uint32 + SecurityDescriptor *byte + InheritHandle uint32 +} + +const ( + cERROR_PIPE_BUSY = syscall.Errno(231) + cERROR_PIPE_CONNECTED = syscall.Errno(535) + cERROR_SEM_TIMEOUT = syscall.Errno(121) + + cPIPE_ACCESS_DUPLEX = 0x3 + cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000 + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 + + cPIPE_REJECT_REMOTE_CLIENTS = 0x8 + + cPIPE_UNLIMITED_INSTANCES = 255 + + cNMPWAIT_USE_DEFAULT_WAIT = 0 + cNMPWAIT_NOWAIT = 1 + + cPIPE_TYPE_MESSAGE = 4 + + cPIPE_READMODE_MESSAGE = 2 +) + +var ( + // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. + // This error should match net.errClosing since docker takes a dependency on its text. + ErrPipeListenerClosed = errors.New("use of closed network connection") + + errPipeWriteClosed = errors.New("pipe has been closed for write") +) + +type win32Pipe struct { + *win32File + path string +} + +type win32MessageBytePipe struct { + win32Pipe + writeClosed bool + readEOF bool +} + +type pipeAddress string + +func (f *win32Pipe) LocalAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) RemoteAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) SetDeadline(t time.Time) error { + f.SetReadDeadline(t) + f.SetWriteDeadline(t) + return nil +} + +// CloseWrite closes the write side of a message pipe in byte mode. +func (f *win32MessageBytePipe) CloseWrite() error { + if f.writeClosed { + return errPipeWriteClosed + } + _, err := f.win32File.Write(nil) + if err != nil { + return err + } + f.writeClosed = true + return nil +} + +// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since +// they are used to implement CloseWrite(). +func (f *win32MessageBytePipe) Write(b []byte) (int, error) { + if f.writeClosed { + return 0, errPipeWriteClosed + } + if len(b) == 0 { + return 0, nil + } + return f.win32File.Write(b) +} + +// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message +// mode pipe will return io.EOF, as will all subsequent reads. +func (f *win32MessageBytePipe) Read(b []byte) (int, error) { + if f.readEOF { + return 0, io.EOF + } + n, err := f.win32File.Read(b) + if err == io.EOF { + // If this was the result of a zero-byte read, then + // it is possible that the read was due to a zero-size + // message. Since we are simulating CloseWrite with a + // zero-byte message, ensure that all future Read() calls + // also return EOF. + f.readEOF = true + } + return n, err +} + +func (s pipeAddress) Network() string { + return "pipe" +} + +func (s pipeAddress) String() string { + return string(s) +} + +// DialPipe connects to a named pipe by path, timing out if the connection +// takes longer than the specified duration. If timeout is nil, then the timeout +// is the default timeout established by the pipe server. +func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { + var absTimeout time.Time + if timeout != nil { + absTimeout = time.Now().Add(*timeout) + } + var err error + var h syscall.Handle + for { + h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err != cERROR_PIPE_BUSY { + break + } + now := time.Now() + var ms uint32 + if absTimeout.IsZero() { + ms = cNMPWAIT_USE_DEFAULT_WAIT + } else if now.After(absTimeout) { + ms = cNMPWAIT_NOWAIT + } else { + ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000) + } + err = waitNamedPipe(path, ms) + if err != nil { + if err == cERROR_SEM_TIMEOUT { + return nil, ErrTimeout + } + break + } + } + if err != nil { + return nil, &os.PathError{Op: "open", Path: path, Err: err} + } + + var flags uint32 + err = getNamedPipeInfo(h, &flags, nil, nil, nil) + if err != nil { + return nil, err + } + + var state uint32 + err = getNamedPipeHandleState(h, &state, nil, nil, nil, nil, 0) + if err != nil { + return nil, err + } + + if state&cPIPE_READMODE_MESSAGE != 0 { + return nil, &os.PathError{Op: "open", Path: path, Err: errors.New("message readmode pipes not supported")} + } + + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + + // If the pipe is in message mode, return a message byte pipe, which + // supports CloseWrite(). + if flags&cPIPE_TYPE_MESSAGE != 0 { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: f, path: path}, + }, nil + } + return &win32Pipe{win32File: f, path: path}, nil +} + +type acceptResponse struct { + f *win32File + err error +} + +type win32PipeListener struct { + firstHandle syscall.Handle + path string + securityDescriptor []byte + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int +} + +func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED + if first { + flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE + } + + var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS + if c.MessageMode { + mode |= cPIPE_TYPE_MESSAGE + } + + var sa securityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + if securityDescriptor != nil { + sa.SecurityDescriptor = &securityDescriptor[0] + } + h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, &sa) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + return h, nil +} + +func (l *win32PipeListener) makeServerPipe() (*win32File, error) { + h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false) + if err != nil { + return nil, err + } + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + return f, nil +} + +func (l *win32PipeListener) listenerRoutine() { + closed := false + for !closed { + select { + case <-l.closeCh: + closed = true + case responseCh := <-l.acceptCh: + p, err := l.makeServerPipe() + if err == nil { + // Wait for the client to connect. + ch := make(chan error) + go func() { + ch <- connectPipe(p) + }() + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { + err = ErrPipeListenerClosed + } + closed = true + } + } + responseCh <- acceptResponse{p, err} + } + } + syscall.Close(l.firstHandle) + l.firstHandle = 0 + // Notify Close() and Accept() callers that the handle has been closed. + close(l.doneCh) +} + +// PipeConfig contain configuration for the pipe listener. +type PipeConfig struct { + // SecurityDescriptor contains a Windows security descriptor in SDDL format. + SecurityDescriptor string + + // MessageMode determines whether the pipe is in byte or message mode. In either + // case the pipe is read in byte mode by default. The only practical difference in + // this implementation is that CloseWrite() is only supported for message mode pipes; + // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only + // transferred to the reader (and returned as io.EOF in this implementation) + // when the pipe is in message mode. + MessageMode bool + + // InputBufferSize specifies the size the input buffer, in bytes. + InputBufferSize int32 + + // OutputBufferSize specifies the size the input buffer, in bytes. + OutputBufferSize int32 +} + +// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. +// The pipe must not already exist. +func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { + var ( + sd []byte + err error + ) + if c == nil { + c = &PipeConfig{} + } + if c.SecurityDescriptor != "" { + sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) + if err != nil { + return nil, err + } + } + h, err := makeServerPipeHandle(path, sd, c, true) + if err != nil { + return nil, err + } + // Immediately open and then close a client handle so that the named pipe is + // created but not currently accepting connections. + h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err != nil { + syscall.Close(h) + return nil, err + } + syscall.Close(h2) + l := &win32PipeListener{ + firstHandle: h, + path: path, + securityDescriptor: sd, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), + } + go l.listenerRoutine() + return l, nil +} + +func connectPipe(p *win32File) error { + c, err := p.prepareIo() + if err != nil { + return err + } + err = connectNamedPipe(p.handle, &c.o) + _, err = p.asyncIo(c, time.Time{}, 0, err) + if err != nil && err != cERROR_PIPE_CONNECTED { + return err + } + return nil +} + +func (l *win32PipeListener) Accept() (net.Conn, error) { + ch := make(chan acceptResponse) + select { + case l.acceptCh <- ch: + response := <-ch + err := response.err + if err != nil { + return nil, err + } + if l.config.MessageMode { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: response.f, path: l.path}, + }, nil + } + return &win32Pipe{win32File: response.f, path: l.path}, nil + case <-l.doneCh: + return nil, ErrPipeListenerClosed + } +} + +func (l *win32PipeListener) Close() error { + select { + case l.closeCh <- 1: + <-l.doneCh + case <-l.doneCh: + } + return nil +} + +func (l *win32PipeListener) Addr() net.Addr { + return pipeAddress(l.path) +} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go new file mode 100644 index 00000000..81f9af7b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -0,0 +1,150 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "runtime" + "syscall" + "unicode/utf16" +) + +//sys adjustTokenPrivileges(token syscall.Handle, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges +//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf +//sys revertToSelf() (err error) = advapi32.RevertToSelf +//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *syscall.Handle) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW +//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW +//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW + +const ( + SE_PRIVILEGE_ENABLED = 2 + + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" +) + +const ( + securityAnonymous = iota + securityIdentification + securityImpersonation + securityDelegation +) + +type PrivilegeError struct { + privileges []uint64 +} + +func (e *PrivilegeError) Error() string { + s := "" + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } else { + s = "Could not enable privilege " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + return s +} + +func RunWithPrivilege(name string, fn func() error) error { + return RunWithPrivileges([]string{name}, fn) +} + +func RunWithPrivileges(names []string, fn func() error) error { + var privileges []uint64 + for _, name := range names { + p := uint64(0) + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return err + } + privileges = append(privileges, p) + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + token, err := newThreadToken() + if err != nil { + return err + } + defer releaseThreadToken(token) + err = adjustPrivileges(token, privileges) + if err != nil { + return err + } + return fn() +} + +func adjustPrivileges(token syscall.Handle, privileges []uint64) error { + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + binary.Write(&b, binary.LittleEndian, p) + binary.Write(&b, binary.LittleEndian, uint32(SE_PRIVILEGE_ENABLED)) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langId uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langId) + if err != nil { + return fmt.Sprintf("", utf16.Decode(nameBuffer[:bufSize])) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func newThreadToken() (syscall.Handle, error) { + err := impersonateSelf(securityImpersonation) + if err != nil { + panic(err) + return 0, err + } + + var token syscall.Handle + err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + if err != nil { + rerr := revertToSelf() + if rerr != nil { + panic(rerr) + } + return 0, err + } + return token, nil +} + +func releaseThreadToken(h syscall.Handle) { + err := revertToSelf() + if err != nil { + panic(err) + } + syscall.Close(h) +} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go new file mode 100644 index 00000000..96d7b9a8 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/reparse.go @@ -0,0 +1,124 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "unicode/utf16" + "unsafe" +) + +const ( + reparseTagMountPoint = 0xA0000003 + reparseTagSymlink = 0xA000000C +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 +} + +// ReparsePoint describes a Win32 symlink or mount point. +type ReparsePoint struct { + Target string + IsMountPoint bool +} + +// UnsupportedReparsePointError is returned when trying to decode a non-symlink or +// mount point reparse point. +type UnsupportedReparsePointError struct { + Tag uint32 +} + +func (e *UnsupportedReparsePointError) Error() string { + return fmt.Sprintf("unsupported reparse point %x", e.Tag) +} + +// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink +// or a mount point. +func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { + isMountPoint := false + tag := binary.LittleEndian.Uint32(b[0:4]) + switch tag { + case reparseTagMountPoint: + isMountPoint = true + case reparseTagSymlink: + default: + return nil, &UnsupportedReparsePointError{tag} + } + nameOffset := 16 + binary.LittleEndian.Uint16(b[12:14]) + if !isMountPoint { + nameOffset += 4 + } + nameLength := binary.LittleEndian.Uint16(b[14:16]) + name := make([]uint16, nameLength/2) + err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) + if err != nil { + return nil, err + } + return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or +// mount point. +func EncodeReparsePoint(rp *ReparsePoint) []byte { + // Generate an NT path and determine if this is a relative path. + var ntTarget string + relative := false + if strings.HasPrefix(rp.Target, `\\?\`) { + ntTarget = rp.Target + } else if strings.HasPrefix(rp.Target, `\\`) { + ntTarget = `\??\UNC\` + rp.Target[2:] + } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { + ntTarget = `\??\` + rp.Target + } else { + ntTarget = rp.Target + relative = true + } + + // The paths must be NUL-terminated even though they are counted strings. + target16 := utf16.Encode([]rune(rp.Target + "\x00")) + ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) + + size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 + size += len(ntTarget16)*2 + len(target16)*2 + + tag := uint32(reparseTagMountPoint) + if !rp.IsMountPoint { + tag = reparseTagSymlink + size += 4 // Add room for symlink flags + } + + data := reparseDataBuffer{ + ReparseTag: tag, + ReparseDataLength: uint16(size), + SubstituteNameOffset: 0, + SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), + PrintNameOffset: uint16(len(ntTarget16) * 2), + PrintNameLength: uint16((len(target16) - 1) * 2), + } + + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, &data) + if !rp.IsMountPoint { + flags := uint32(0) + if relative { + flags |= 1 + } + binary.Write(&b, binary.LittleEndian, flags) + } + + binary.Write(&b, binary.LittleEndian, ntTarget16) + binary.Write(&b, binary.LittleEndian, target16) + return b.Bytes() +} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go new file mode 100644 index 00000000..60ab56ce --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -0,0 +1,96 @@ +package winio + +import ( + "syscall" + "unsafe" +) + +//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW +//sys localFree(mem uintptr) = LocalFree +//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength + +const ( + cERROR_NONE_MAPPED = syscall.Errno(1332) +) + +type AccountLookupError struct { + Name string + Err error +} + +func (e *AccountLookupError) Error() string { + if e.Name == "" { + return "lookup account: empty account name specified" + } + var s string + switch e.Err { + case cERROR_NONE_MAPPED: + s = "not found" + default: + s = e.Err.Error() + } + return "lookup account " + e.Name + ": " + s +} + +type SddlConversionError struct { + Sddl string + Err error +} + +func (e *SddlConversionError) Error() string { + return "convert " + e.Sddl + ": " + e.Err.Error() +} + +// LookupSidByName looks up the SID of an account by name +func LookupSidByName(name string) (sid string, err error) { + if name == "" { + return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + } + + var sidSize, sidNameUse, refDomainSize uint32 + err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + return "", &AccountLookupError{name, err} + } + sidBuffer := make([]byte, sidSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{name, err} + } + var strBuffer *uint16 + err = convertSidToStringSid(&sidBuffer[0], &strBuffer) + if err != nil { + return "", &AccountLookupError{name, err} + } + sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + localFree(uintptr(unsafe.Pointer(strBuffer))) + return sid, nil +} + +func SddlToSecurityDescriptor(sddl string) ([]byte, error) { + var sdBuffer uintptr + err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + if err != nil { + return nil, &SddlConversionError{sddl, err} + } + defer localFree(sdBuffer) + sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) + copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) + return sd, nil +} + +func SecurityDescriptorToSddl(sd []byte) (string, error) { + var sddl *uint16 + // The returned string length seems to including an aribtrary number of terminating NULs. + // Don't use it. + err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) + if err != nil { + return "", err + } + defer localFree(uintptr(unsafe.Pointer(sddl))) + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go new file mode 100644 index 00000000..96fdff7b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -0,0 +1,3 @@ +package winio + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall.go b/vendor/github.com/Microsoft/go-winio/zsyscall.go new file mode 100644 index 00000000..74b6e97a --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/zsyscall.go @@ -0,0 +1,492 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package winio + +import "unsafe" +import "syscall" + +var _ unsafe.Pointer + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + modwinmm = syscall.NewLazyDLL("winmm.dll") + modadvapi32 = syscall.NewLazyDLL("advapi32.dll") + + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procLocalFree = modkernel32.NewProc("LocalFree") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") +) + +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func timeBeginPeriod(period uint32) (n int32) { + r0, _, _ := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + n = int32(r0) + return +} + +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) +} + +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _createFile(name *uint16, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func waitNamedPipe(name string, timeout uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _waitNamedPipe(_p0, timeout) +} + +func _waitNamedPipe(name *uint16, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(accountName) + if err != nil { + return + } + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) +} + +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return +} + +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func adjustTokenPrivileges(token syscall.Handle, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *syscall.Handle) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/RackSec/srslog/.gitignore b/vendor/github.com/RackSec/srslog/.gitignore new file mode 100644 index 00000000..ebf0f2e4 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/.gitignore @@ -0,0 +1 @@ +.cover diff --git a/vendor/github.com/RackSec/srslog/.travis.yml b/vendor/github.com/RackSec/srslog/.travis.yml new file mode 100644 index 00000000..4e5c4f07 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/.travis.yml @@ -0,0 +1,18 @@ +sudo: required +dist: trusty +group: edge +language: go +go: +- 1.5 +before_install: + - pip install --user codecov +script: +- | + go get ./... + go test -v -coverprofile=coverage.txt -covermode=atomic + go vet +after_success: + - codecov +notifications: + slack: + secure: dtDue9gP6CRR1jYjEf6raXXFak3QKGcCFvCf5mfvv5XScdpmc3udwgqc5TdyjC0goaC9OK/4jTcCD30dYZm/u6ux3E9mo3xwMl2xRLHx76p5r9rSQtloH19BDwA2+A+bpDfFQVz05k2YXuTiGSvNMMdwzx+Dr294Sl/z43RFB4+b9/R/6LlFpRW89IwftvpLAFnBy4K/ZcspQzKM+rQfQTL5Kk+iZ/KBsuR/VziDq6MoJ8t43i4ee8vwS06vFBKDbUiZ4FIZpLgc2RAL5qso5aWRKYXL6waXfoKHZWKPe0w4+9IY1rDJxG1jEb7YGgcbLaF9xzPRRs2b2yO/c87FKpkh6PDgYHfLjpgXotCoojZrL4p1x6MI1ldJr3NhARGPxS9r4liB9n6Y5nD+ErXi1IMf55fuUHcPY27Jc0ySeLFeM6cIWJ8OhFejCgGw6a5DnnmJo0PqopsaBDHhadpLejT1+K6bL2iGkT4SLcVNuRGLs+VyuNf1+5XpkWZvy32vquO7SZOngLLBv+GIem+t3fWm0Z9s/0i1uRCQei1iUutlYjoV/LBd35H2rhob4B5phIuJin9kb0zbHf6HnaoN0CtN8r0d8G5CZiInVlG5Xcid5Byb4dddf5U2EJTDuCMVyyiM7tcnfjqw9UbVYNxtYM9SzcqIq+uVqM8pYL9xSec= diff --git a/vendor/github.com/RackSec/srslog/CODE_OF_CONDUCT.md b/vendor/github.com/RackSec/srslog/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..18ac49fc --- /dev/null +++ b/vendor/github.com/RackSec/srslog/CODE_OF_CONDUCT.md @@ -0,0 +1,50 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, and in the interest of +fostering an open and welcoming community, we pledge to respect all people who +contribute through reporting issues, posting feature requests, updating +documentation, submitting pull requests or patches, and other activities. + +We are committed to making participation in this project a harassment-free +experience for everyone, regardless of level of experience, gender, gender +identity and expression, sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, such as physical or electronic + addresses, without explicit permission +* Other unethical or unprofessional conduct + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +By adopting this Code of Conduct, project maintainers commit themselves to +fairly and consistently applying these principles to every aspect of managing +this project. Project maintainers who do not follow or enforce the Code of +Conduct may be permanently removed from the project team. + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting a project maintainer at [sirsean@gmail.com]. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. Maintainers are +obligated to maintain confidentiality with regard to the reporter of an +incident. + + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.3.0, available at +[http://contributor-covenant.org/version/1/3/0/][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/3/0/ diff --git a/vendor/github.com/RackSec/srslog/LICENSE b/vendor/github.com/RackSec/srslog/LICENSE new file mode 100644 index 00000000..9269338f --- /dev/null +++ b/vendor/github.com/RackSec/srslog/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015 Rackspace. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/RackSec/srslog/README.md b/vendor/github.com/RackSec/srslog/README.md new file mode 100644 index 00000000..1ae1fd4e --- /dev/null +++ b/vendor/github.com/RackSec/srslog/README.md @@ -0,0 +1,131 @@ +[![Build Status](https://travis-ci.org/RackSec/srslog.svg?branch=master)](https://travis-ci.org/RackSec/srslog) + +# srslog + +Go has a `syslog` package in the standard library, but it has the following +shortcomings: + +1. It doesn't have TLS support +2. [According to bradfitz on the Go team, it is no longer being maintained.](https://github.com/golang/go/issues/13449#issuecomment-161204716) + +I agree that it doesn't need to be in the standard library. So, I've +followed Brad's suggestion and have made a separate project to handle syslog. + +This code was taken directly from the Go project as a base to start from. + +However, this _does_ have TLS support. + +# Usage + +Basic usage retains the same interface as the original `syslog` package. We +only added to the interface where required to support new functionality. + +Switch from the standard library: + +``` +import( + //"log/syslog" + syslog "github.com/RackSec/srslog" +) +``` + +You can still use it for local syslog: + +``` +w, err := syslog.Dial("", "", syslog.LOG_ERR, "testtag") +``` + +Or to unencrypted UDP: + +``` +w, err := syslog.Dial("udp", "192.168.0.50:514", syslog.LOG_ERR, "testtag") +``` + +Or to unencrypted TCP: + +``` +w, err := syslog.Dial("tcp", "192.168.0.51:514", syslog.LOG_ERR, "testtag") +``` + +But now you can also send messages via TLS-encrypted TCP: + +``` +w, err := syslog.DialWithTLSCertPath("tcp+tls", "192.168.0.52:514", syslog.LOG_ERR, "testtag", "/path/to/servercert.pem") +``` + +And if you need more control over your TLS configuration : + +``` +pool := x509.NewCertPool() +serverCert, err := ioutil.ReadFile("/path/to/servercert.pem") +if err != nil { + return nil, err +} +pool.AppendCertsFromPEM(serverCert) +config := tls.Config{ + RootCAs: pool, +} + +w, err := DialWithTLSConfig(network, raddr, priority, tag, &config) +``` + +(Note that in both TLS cases, this uses a self-signed certificate, where the +remote syslog server has the keypair and the client has only the public key.) + +And then to write log messages, continue like so: + +``` +if err != nil { + log.Fatal("failed to connect to syslog:", err) +} +defer w.Close() + +w.Alert("this is an alert") +w.Crit("this is critical") +w.Err("this is an error") +w.Warning("this is a warning") +w.Notice("this is a notice") +w.Info("this is info") +w.Debug("this is debug") +w.Write([]byte("these are some bytes")) +``` + +# Generating TLS Certificates + +We've provided a script that you can use to generate a self-signed keypair: + +``` +pip install cryptography +python script/gen-certs.py +``` + +That outputs the public key and private key to standard out. Put those into +`.pem` files. (And don't put them into any source control. The certificate in +the `test` directory is used by the unit tests, and please do not actually use +it anywhere else.) + +# Running Tests + +Run the tests as usual: + +``` +go test +``` + +But we've also provided a test coverage script that will show you which +lines of code are not covered: + +``` +script/coverage --html +``` + +That will open a new browser tab showing coverage information. + +# License + +This project uses the New BSD License, the same as the Go project itself. + +# Code of Conduct + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. diff --git a/vendor/github.com/RackSec/srslog/constants.go b/vendor/github.com/RackSec/srslog/constants.go new file mode 100644 index 00000000..600801ee --- /dev/null +++ b/vendor/github.com/RackSec/srslog/constants.go @@ -0,0 +1,68 @@ +package srslog + +import ( + "errors" +) + +// Priority is a combination of the syslog facility and +// severity. For example, LOG_ALERT | LOG_FTP sends an alert severity +// message from the FTP facility. The default severity is LOG_EMERG; +// the default facility is LOG_KERN. +type Priority int + +const severityMask = 0x07 +const facilityMask = 0xf8 + +const ( + // Severity. + + // From /usr/include/sys/syslog.h. + // These are the same on Linux, BSD, and OS X. + LOG_EMERG Priority = iota + LOG_ALERT + LOG_CRIT + LOG_ERR + LOG_WARNING + LOG_NOTICE + LOG_INFO + LOG_DEBUG +) + +const ( + // Facility. + + // From /usr/include/sys/syslog.h. + // These are the same up to LOG_FTP on Linux, BSD, and OS X. + LOG_KERN Priority = iota << 3 + LOG_USER + LOG_MAIL + LOG_DAEMON + LOG_AUTH + LOG_SYSLOG + LOG_LPR + LOG_NEWS + LOG_UUCP + LOG_CRON + LOG_AUTHPRIV + LOG_FTP + _ // unused + _ // unused + _ // unused + _ // unused + LOG_LOCAL0 + LOG_LOCAL1 + LOG_LOCAL2 + LOG_LOCAL3 + LOG_LOCAL4 + LOG_LOCAL5 + LOG_LOCAL6 + LOG_LOCAL7 +) + +func validatePriority(p Priority) error { + if p < 0 || p > LOG_LOCAL7|LOG_DEBUG { + return errors.New("log/syslog: invalid priority") + } else { + return nil + } +} diff --git a/vendor/github.com/RackSec/srslog/dialer.go b/vendor/github.com/RackSec/srslog/dialer.go new file mode 100644 index 00000000..47a7b2be --- /dev/null +++ b/vendor/github.com/RackSec/srslog/dialer.go @@ -0,0 +1,87 @@ +package srslog + +import ( + "crypto/tls" + "net" +) + +// dialerFunctionWrapper is a simple object that consists of a dialer function +// and its name. This is primarily for testing, so we can make sure that the +// getDialer method returns the correct dialer function. However, if you ever +// find that you need to check which dialer function you have, this would also +// be useful for you without having to use reflection. +type dialerFunctionWrapper struct { + Name string + Dialer func() (serverConn, string, error) +} + +// Call the wrapped dialer function and return its return values. +func (df dialerFunctionWrapper) Call() (serverConn, string, error) { + return df.Dialer() +} + +// getDialer returns a "dialer" function that can be called to connect to a +// syslog server. +// +// Each dialer function is responsible for dialing the remote host and returns +// a serverConn, the hostname (or a default if the Writer has not specified a +// hostname), and an error in case dialing fails. +// +// The reason for separate dialers is that different network types may need +// to dial their connection differently, yet still provide a net.Conn interface +// that you can use once they have dialed. Rather than an increasingly long +// conditional, we have a map of network -> dialer function (with a sane default +// value), and adding a new network type is as easy as writing the dialer +// function and adding it to the map. +func (w *Writer) getDialer() dialerFunctionWrapper { + dialers := map[string]dialerFunctionWrapper{ + "": dialerFunctionWrapper{"unixDialer", w.unixDialer}, + "tcp+tls": dialerFunctionWrapper{"tlsDialer", w.tlsDialer}, + } + dialer, ok := dialers[w.network] + if !ok { + dialer = dialerFunctionWrapper{"basicDialer", w.basicDialer} + } + return dialer +} + +// unixDialer uses the unixSyslog method to open a connection to the syslog +// daemon running on the local machine. +func (w *Writer) unixDialer() (serverConn, string, error) { + sc, err := unixSyslog() + hostname := w.hostname + if hostname == "" { + hostname = "localhost" + } + return sc, hostname, err +} + +// tlsDialer connects to TLS over TCP, and is used for the "tcp+tls" network +// type. +func (w *Writer) tlsDialer() (serverConn, string, error) { + c, err := tls.Dial("tcp", w.raddr, w.tlsConfig) + var sc serverConn + hostname := w.hostname + if err == nil { + sc = &netConn{conn: c} + if hostname == "" { + hostname = c.LocalAddr().String() + } + } + return sc, hostname, err +} + +// basicDialer is the most common dialer for syslog, and supports both TCP and +// UDP connections. +func (w *Writer) basicDialer() (serverConn, string, error) { + c, err := net.Dial(w.network, w.raddr) + var sc serverConn + hostname := w.hostname + if err == nil { + sc = &netConn{conn: c} + if hostname == "" { + hostname = c.LocalAddr().String() + } + } + return sc, hostname, err +} diff --git a/vendor/github.com/RackSec/srslog/formatter.go b/vendor/github.com/RackSec/srslog/formatter.go new file mode 100644 index 00000000..2a746251 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/formatter.go @@ -0,0 +1,48 @@ +package srslog + +import ( + "fmt" + "os" + "time" +) + +// Formatter is a type of function that takes the consituent parts of a +// syslog message and returns a formatted string. A different Formatter is +// defined for each different syslog protocol we support. +type Formatter func(p Priority, hostname, tag, content string) string + +// DefaultFormatter is the original format supported by the Go syslog package, +// and is a non-compliant amalgamation of 3164 and 5424 that is intended to +// maximize compatibility. +func DefaultFormatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + msg := fmt.Sprintf("<%d> %s %s %s[%d]: %s", + p, timestamp, hostname, tag, os.Getpid(), content) + return msg +} + +// UnixFormatter omits the hostname, because it is only used locally. +func UnixFormatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.Stamp) + msg := fmt.Sprintf("<%d>%s %s[%d]: %s", + p, timestamp, tag, os.Getpid(), content) + return msg +} + +// RFC3164Formatter provides an RFC 3164 compliant message. +func RFC3164Formatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.Stamp) + msg := fmt.Sprintf("<%d> %s %s %s[%d]: %s", + p, timestamp, hostname, tag, os.Getpid(), content) + return msg +} + +// RFC5424Formatter provides an RFC 5424 compliant message. +func RFC5424Formatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + pid := os.Getpid() + appName := os.Args[0] + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", + p, 1, timestamp, hostname, appName, pid, tag, content) + return msg +} diff --git a/vendor/github.com/RackSec/srslog/framer.go b/vendor/github.com/RackSec/srslog/framer.go new file mode 100644 index 00000000..ab46f0de --- /dev/null +++ b/vendor/github.com/RackSec/srslog/framer.go @@ -0,0 +1,24 @@ +package srslog + +import ( + "fmt" +) + +// Framer is a type of function that takes an input string (typically an +// already-formatted syslog message) and applies "message framing" to it. We +// have different framers because different versions of the syslog protocol +// and its transport requirements define different framing behavior. +type Framer func(in string) string + +// DefaultFramer does nothing, since there is no framing to apply. This is +// the original behavior of the Go syslog package, and is also typically used +// for UDP syslog. +func DefaultFramer(in string) string { + return in +} + +// RFC5425MessageLengthFramer prepends the message length to the front of the +// provided message, as defined in RFC 5425. +func RFC5425MessageLengthFramer(in string) string { + return fmt.Sprintf("%d %s", len(in), in) +} diff --git a/vendor/github.com/RackSec/srslog/net_conn.go b/vendor/github.com/RackSec/srslog/net_conn.go new file mode 100644 index 00000000..75e4c3ca --- /dev/null +++ b/vendor/github.com/RackSec/srslog/net_conn.go @@ -0,0 +1,30 @@ +package srslog + +import ( + "net" +) + +// netConn has an internal net.Conn and adheres to the serverConn interface, +// allowing us to send syslog messages over the network. +type netConn struct { + conn net.Conn +} + +// writeString formats syslog messages using time.RFC3339 and includes the +// hostname, and sends the message to the connection. +func (n *netConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { + if framer == nil { + framer = DefaultFramer + } + if formatter == nil { + formatter = DefaultFormatter + } + formattedMessage := framer(formatter(p, hostname, tag, msg)) + _, err := n.conn.Write([]byte(formattedMessage)) + return err +} + +// close the network connection +func (n *netConn) close() error { + return n.conn.Close() +} diff --git a/vendor/github.com/RackSec/srslog/srslog.go b/vendor/github.com/RackSec/srslog/srslog.go new file mode 100644 index 00000000..4469d720 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/srslog.go @@ -0,0 +1,100 @@ +package srslog + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "log" + "os" +) + +// This interface allows us to work with both local and network connections, +// and enables Solaris support (see syslog_unix.go). +type serverConn interface { + writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, s string) error + close() error +} + +// New establishes a new connection to the system log daemon. Each +// write to the returned Writer sends a log message with the given +// priority and prefix. +func New(priority Priority, tag string) (w *Writer, err error) { + return Dial("", "", priority, tag) +} + +// Dial establishes a connection to a log daemon by connecting to +// address raddr on the specified network. Each write to the returned +// Writer sends a log message with the given facility, severity and +// tag. +// If network is empty, Dial will connect to the local syslog server. +func Dial(network, raddr string, priority Priority, tag string) (*Writer, error) { + return DialWithTLSConfig(network, raddr, priority, tag, nil) +} + +// DialWithTLSCertPath establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses certPath to load TLS certificates and configure +// the secure connection. +func DialWithTLSCertPath(network, raddr string, priority Priority, tag, certPath string) (*Writer, error) { + serverCert, err := ioutil.ReadFile(certPath) + if err != nil { + return nil, err + } + + return DialWithTLSCert(network, raddr, priority, tag, serverCert) +} + +// DialWIthTLSCert establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses serverCert to load a TLS certificate +// and configure the secure connection. +func DialWithTLSCert(network, raddr string, priority Priority, tag string, serverCert []byte) (*Writer, error) { + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(serverCert) + config := tls.Config{ + RootCAs: pool, + } + + return DialWithTLSConfig(network, raddr, priority, tag, &config) +} + +// DialWithTLSConfig establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses tlsConfig to configure the secure connection. +func DialWithTLSConfig(network, raddr string, priority Priority, tag string, tlsConfig *tls.Config) (*Writer, error) { + if err := validatePriority(priority); err != nil { + return nil, err + } + + if tag == "" { + tag = os.Args[0] + } + hostname, _ := os.Hostname() + + w := &Writer{ + priority: priority, + tag: tag, + hostname: hostname, + network: network, + raddr: raddr, + tlsConfig: tlsConfig, + } + + w.Lock() + defer w.Unlock() + + err := w.connect() + if err != nil { + return nil, err + } + return w, err +} + +// NewLogger creates a log.Logger whose output is written to +// the system log service with the specified priority. The logFlag +// argument is the flag set passed through to log.New to create +// the Logger. +func NewLogger(p Priority, logFlag int) (*log.Logger, error) { + s, err := New(p, "") + if err != nil { + return nil, err + } + return log.New(s, "", logFlag), nil +} diff --git a/vendor/github.com/RackSec/srslog/srslog_unix.go b/vendor/github.com/RackSec/srslog/srslog_unix.go new file mode 100644 index 00000000..a04d9396 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/srslog_unix.go @@ -0,0 +1,54 @@ +package srslog + +import ( + "errors" + "io" + "net" +) + +// unixSyslog opens a connection to the syslog daemon running on the +// local machine using a Unix domain socket. This function exists because of +// Solaris support as implemented by gccgo. On Solaris you can not +// simply open a TCP connection to the syslog daemon. The gccgo +// sources have a syslog_solaris.go file that implements unixSyslog to +// return a type that satisfies the serverConn interface and simply calls the C +// library syslog function. +func unixSyslog() (conn serverConn, err error) { + logTypes := []string{"unixgram", "unix"} + logPaths := []string{"/dev/log", "/var/run/syslog", "/var/run/log"} + for _, network := range logTypes { + for _, path := range logPaths { + conn, err := net.Dial(network, path) + if err != nil { + continue + } else { + return &localConn{conn: conn}, nil + } + } + } + return nil, errors.New("Unix syslog delivery error") +} + +// localConn adheres to the serverConn interface, allowing us to send syslog +// messages to the local syslog daemon over a Unix domain socket. +type localConn struct { + conn io.WriteCloser +} + +// writeString formats syslog messages using time.Stamp instead of time.RFC3339, +// and omits the hostname (because it is expected to be used locally). +func (n *localConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { + if framer == nil { + framer = DefaultFramer + } + if formatter == nil { + formatter = UnixFormatter + } + _, err := n.conn.Write([]byte(framer(formatter(p, hostname, tag, msg)))) + return err +} + +// close the (local) network connection +func (n *localConn) close() error { + return n.conn.Close() +} diff --git a/vendor/github.com/RackSec/srslog/writer.go b/vendor/github.com/RackSec/srslog/writer.go new file mode 100644 index 00000000..fdecaf61 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/writer.go @@ -0,0 +1,164 @@ +package srslog + +import ( + "crypto/tls" + "strings" + "sync" +) + +// A Writer is a connection to a syslog server. +type Writer struct { + sync.Mutex // guards conn + + priority Priority + tag string + hostname string + network string + raddr string + tlsConfig *tls.Config + framer Framer + formatter Formatter + + conn serverConn +} + +// connect makes a connection to the syslog server. +// It must be called with w.mu held. +func (w *Writer) connect() (err error) { + if w.conn != nil { + // ignore err from close, it makes sense to continue anyway + w.conn.close() + w.conn = nil + } + + var conn serverConn + var hostname string + dialer := w.getDialer() + conn, hostname, err = dialer.Call() + if err == nil { + w.conn = conn + w.hostname = hostname + } + + return +} + +// SetFormatter changes the formatter function for subsequent messages. +func (w *Writer) SetFormatter(f Formatter) { + w.formatter = f +} + +// SetFramer changes the framer function for subsequent messages. +func (w *Writer) SetFramer(f Framer) { + w.framer = f +} + +// Write sends a log message to the syslog daemon using the default priority +// passed into `srslog.New` or the `srslog.Dial*` functions. +func (w *Writer) Write(b []byte) (int, error) { + return w.writeAndRetry(w.priority, string(b)) +} + +// Close closes a connection to the syslog daemon. +func (w *Writer) Close() error { + w.Lock() + defer w.Unlock() + + if w.conn != nil { + err := w.conn.close() + w.conn = nil + return err + } + return nil +} + +// Emerg logs a message with severity LOG_EMERG; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Emerg(m string) (err error) { + _, err = w.writeAndRetry(LOG_EMERG, m) + return err +} + +// Alert logs a message with severity LOG_ALERT; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Alert(m string) (err error) { + _, err = w.writeAndRetry(LOG_ALERT, m) + return err +} + +// Crit logs a message with severity LOG_CRIT; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Crit(m string) (err error) { + _, err = w.writeAndRetry(LOG_CRIT, m) + return err +} + +// Err logs a message with severity LOG_ERR; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Err(m string) (err error) { + _, err = w.writeAndRetry(LOG_ERR, m) + return err +} + +// Warning logs a message with severity LOG_WARNING; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Warning(m string) (err error) { + _, err = w.writeAndRetry(LOG_WARNING, m) + return err +} + +// Notice logs a message with severity LOG_NOTICE; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Notice(m string) (err error) { + _, err = w.writeAndRetry(LOG_NOTICE, m) + return err +} + +// Info logs a message with severity LOG_INFO; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Info(m string) (err error) { + _, err = w.writeAndRetry(LOG_INFO, m) + return err +} + +// Debug logs a message with severity LOG_DEBUG; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Debug(m string) (err error) { + _, err = w.writeAndRetry(LOG_DEBUG, m) + return err +} + +func (w *Writer) writeAndRetry(p Priority, s string) (int, error) { + pr := (w.priority & facilityMask) | (p & severityMask) + + w.Lock() + defer w.Unlock() + + if w.conn != nil { + if n, err := w.write(pr, s); err == nil { + return n, err + } + } + if err := w.connect(); err != nil { + return 0, err + } + return w.write(pr, s) +} + +// write generates and writes a syslog formatted string. It formats the +// message based on the current Formatter and Framer. +func (w *Writer) write(p Priority, msg string) (int, error) { + // ensure it ends in a \n + if !strings.HasSuffix(msg, "\n") { + msg += "\n" + } + + err := w.conn.writeString(w.framer, w.formatter, p, w.hostname, w.tag, msg) + if err != nil { + return 0, err + } + // Note: return the length of the input, not the number of + // bytes printed by Fprintf, because this must behave like + // an io.Writer. + return len(msg), nil +} diff --git a/vendor/github.com/boltdb/bolt/.gitignore b/vendor/github.com/boltdb/bolt/.gitignore new file mode 100644 index 00000000..c7bd2b7a --- /dev/null +++ b/vendor/github.com/boltdb/bolt/.gitignore @@ -0,0 +1,4 @@ +*.prof +*.test +*.swp +/bin/ diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/boltdb/bolt/LICENSE new file mode 100644 index 00000000..004e77fe --- /dev/null +++ b/vendor/github.com/boltdb/bolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/boltdb/bolt/Makefile b/vendor/github.com/boltdb/bolt/Makefile new file mode 100644 index 00000000..e035e63a --- /dev/null +++ b/vendor/github.com/boltdb/bolt/Makefile @@ -0,0 +1,18 @@ +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +default: build + +race: + @go test -v -race -test.run="TestSimulate_(100op|1000op)" + +# go get github.com/kisielk/errcheck +errcheck: + @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt + +test: + @go test -v -cover . + @go test -v ./cmd/bolt + +.PHONY: fmt test diff --git a/vendor/github.com/boltdb/bolt/README.md b/vendor/github.com/boltdb/bolt/README.md new file mode 100644 index 00000000..66b19ace --- /dev/null +++ b/vendor/github.com/boltdb/bolt/README.md @@ -0,0 +1,844 @@ +Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg) +==== + +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, +fast, and reliable database for projects that don't require a full database +server such as Postgres or MySQL. + +Since Bolt is meant to be used as such a low-level piece of functionality, +simplicity is key. The API will be small and only focus on getting values +and setting values. That's it. + +[hyc_symas]: https://twitter.com/hyc_symas +[lmdb]: http://symas.com/mdb/ + +## Project Status + +Bolt is stable and the API is fixed. Full unit test coverage and randomized +black box testing are used to ensure database consistency and thread safety. +Bolt is currently in high-load production environments serving databases as +large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed +services every day. + +## Table of Contents + +- [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) +- [Resources](#resources) +- [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) +- [Caveats & Limitations](#caveats--limitations) +- [Reading the Source](#reading-the-source) +- [Other Projects Using Bolt](#other-projects-using-bolt) + +## Getting Started + +### Installing + +To start using Bolt, install Go and run `go get`: + +```sh +$ go get github.com/boltdb/bolt/... +``` + +This will retrieve the library and install the `bolt` command line utility into +your `$GOBIN` path. + + +### Opening a database + +The top-level object in Bolt is a `DB`. It is represented as a single file on +your disk and represents a consistent snapshot of your data. + +To open your database, simply use the `bolt.Open()` function: + +```go +package main + +import ( + "log" + + "github.com/boltdb/bolt" +) + +func main() { + // Open the my.db data file in your current directory. + // It will be created if it doesn't exist. + db, err := bolt.Open("my.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +Please note that Bolt obtains a file lock on the data file so multiple processes +cannot open the same database at the same time. Opening an already open Bolt +database will cause it to hang until the other process closes it. To prevent +an indefinite wait you can pass a timeout option to the `Open()` function: + +```go +db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) +``` + + +### Transactions + +Bolt allows only one read-write transaction at a time but allows as many +read-only transactions as you want at a time. Each transaction has a consistent +view of the data as it existed when the transaction started. + +Individual transactions and all objects created from them (e.g. buckets, keys) +are not thread safe. To work with data in multiple goroutines you must start +a transaction for each one or use locking to ensure only one goroutine accesses +a transaction at a time. Creating transaction from the `DB` is thread safe. + +Read-only transactions and read-write transactions should not depend on one +another and generally shouldn't be opened simultaneously in the same goroutine. +This can cause a deadlock as the read-write transaction needs to periodically +re-map the data file but it cannot do so while a read-only transaction is open. + + +#### Read-write transactions + +To start a read-write transaction, you can use the `DB.Update()` function: + +```go +err := db.Update(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Inside the closure, you have a consistent view of the database. You commit the +transaction by returning `nil` at the end. You can also rollback the transaction +at any point by returning an error. All database operations are allowed inside +a read-write transaction. + +Always check the return error as it will report any disk failures that can cause +your transaction to not complete. If you return an error within your closure +it will be passed through. + + +#### Read-only transactions + +To start a read-only transaction, you can use the `DB.View()` function: + +```go +err := db.View(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +You also get a consistent view of the database within this closure, however, +no mutating operations are allowed within a read-only transaction. You can only +retrieve buckets, retrieve values, and copy the database within a read-only +transaction. + + +#### Batch read-write transactions + +Each `DB.Update()` waits for disk to commit the writes. This overhead +can be minimized by combining multiple updates with the `DB.Batch()` +function: + +```go +err := db.Batch(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Concurrent Batch calls are opportunistically combined into larger +transactions. Batch is only useful when there are multiple goroutines +calling it. + +The trade-off is that `Batch` can call the given +function multiple times, if parts of the transaction fail. The +function must be idempotent and side effects must take effect only +after a successful return from `DB.Batch()`. + +For example: don't display messages from inside the function, instead +set variables in the enclosing scope: + +```go +var id uint64 +err := db.Batch(func(tx *bolt.Tx) error { + // Find last key in bucket, decode as bigendian uint64, increment + // by one, encode back to []byte, and add new key. + ... + id = newValue + return nil +}) +if err != nil { + return ... +} +fmt.Println("Allocated ID %d", id) +``` + + +#### Managing transactions manually + +The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` +function. These helper functions will start the transaction, execute a function, +and then safely close your transaction if an error is returned. This is the +recommended way to use Bolt transactions. + +However, sometimes you may want to manually start and end your transactions. +You can use the `Tx.Begin()` function directly but **please** be sure to close +the transaction. + +```go +// Start a writable transaction. +tx, err := db.Begin(true) +if err != nil { + return err +} +defer tx.Rollback() + +// Use the transaction... +_, err := tx.CreateBucket([]byte("MyBucket")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := tx.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.Begin()` is a boolean stating if the transaction +should be writable. + + +### Using buckets + +Buckets are collections of key/value pairs within the database. All keys in a +bucket must be unique. You can create a bucket using the `DB.CreateBucket()` +function: + +```go +db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("MyBucket")) + if err != nil { + return fmt.Errorf("create bucket: %s", err) + } + return nil +}) +``` + +You can also create a bucket only if it doesn't exist by using the +`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this +function for all your top-level buckets after you open your database so you can +guarantee that they exist for future transactions. + +To delete a bucket, simply call the `Tx.DeleteBucket()` function. + + +### Using key/value pairs + +To save a key/value pair to a bucket, use the `Bucket.Put()` function: + +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Put([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"` in the `MyBucket` +bucket. To retrieve this value, we can use the `Bucket.Get()` function: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + v := b.Get([]byte("answer")) + fmt.Printf("The answer is: %s\n", v) + return nil +}) +``` + +The `Get()` function does not return an error because its operation is +guaranteed to work (unless there is some kind of system failure). If the key +exists then it will return its byte slice value. If it doesn't exist then it +will return `nil`. It's important to note that you can have a zero-length value +set to a key which is different than the key not existing. + +Use the `Bucket.Delete()` function to delete a key from the bucket. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + + +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ = b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + +### Iterating over keys + +Bolt stores its keys in byte-sorted order within a bucket. This makes sequential +iteration over these keys extremely fast. To iterate over keys we'll use a +`Cursor`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +The cursor allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +The following functions are available on the cursor: + +``` +First() Move to the first key. +Last() Move to the last key. +Seek() Move to a specific key. +Next() Move to the next key. +Prev() Move to the previous key. +``` + +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. + + +#### Prefix scans + +To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket([]byte("MyBucket")).Cursor() + + prefix := []byte("1234") + for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +#### Range scans + +Another common use case is scanning over a range such as a time range. If you +use a sortable time encoding such as RFC3339 then you can query a specific +date range like this: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume our events bucket exists and has RFC3339 encoded time keys. + c := tx.Bucket([]byte("Events")).Cursor() + + // Our time range spans the 90's decade. + min := []byte("1990-01-01T00:00:00Z") + max := []byte("2000-01-01T00:00:00Z") + + // Iterate over the 90's. + for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { + fmt.Printf("%s: %s\n", k, v) + } + + return nil +}) +``` + + +#### ForEach() + +You can also use the function `ForEach()` if you know you'll be iterating over +all the keys in a bucket: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + b.ForEach(func(k, v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + return nil +}) +``` + + +### Nested buckets + +You can also store a bucket in a key to create nested buckets. The API is the +same as the bucket management API on the `DB` object: + +```go +func (*Bucket) CreateBucket(key []byte) (*Bucket, error) +func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) +func (*Bucket) DeleteBucket(key []byte) error +``` + + +### Database backups + +Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` +function to write a consistent view of the database to a writer. If you call +this from a read-only transaction, it will perform a hot backup and not block +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. + +One common use case is to backup over HTTP so you can use tools like `cURL` to +do database backups: + +```go +func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { + err := db.View(func(tx *bolt.Tx) error { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) + w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) + _, err := tx.WriteTo(w) + return err + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +Then you can backup using this command: + +```sh +$ curl http://localhost/backup > my.db +``` + +Or you can open your browser to `http://localhost/backup` and it will download +automatically. + +If you want to backup to another file you can use the `Tx.CopyFile()` helper +function. + + +### Statistics + +The database keeps a running count of many of the internal operations it +performs so you can better understand what's going on. By grabbing a snapshot +of these stats at two points in time we can see what operations were performed +in that time range. + +For example, we could start a goroutine to log stats every 10 seconds: + +```go +go func() { + // Grab the initial stats. + prev := db.Stats() + + for { + // Wait for 10s. + time.Sleep(10 * time.Second) + + // Grab the current stats and diff them. + stats := db.Stats() + diff := stats.Sub(&prev) + + // Encode stats to JSON and print to STDERR. + json.NewEncoder(os.Stderr).Encode(diff) + + // Save stats for the next loop. + prev = stats + } +}() +``` + +It's also useful to pipe these stats to a service such as statsd for monitoring +or to provide an HTTP endpoint that will perform a fixed-length sample. + + +### Read-Only Mode + +Sometimes it is useful to create a shared, read-only Bolt database. To this, +set the `Options.ReadOnly` flag when opening your database. Read-only mode +uses a shared lock to allow multiple processes to read from the database but +it will block any processes from opening the database in read-write mode. + +```go +db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +if err != nil { + log.Fatal(err) +} +``` + +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +contstructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` + +## Resources + +For more information on getting started with Bolt, check out the following articles: + +* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). +* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville + + +## Comparison with other databases + +### Postgres, MySQL, & other relational databases + +Relational databases structure data into rows and are only accessible through +the use of SQL. This approach provides flexibility in how you store and query +your data but also incurs overhead in parsing and planning SQL statements. Bolt +accesses all data by a byte slice key. This makes Bolt fast to read and write +data by key but provides no built-in support for joining values together. + +Most relational databases (with the exception of SQLite) are standalone servers +that run separately from your application. This gives your systems +flexibility to connect multiple application servers to a single database +server but also adds overhead in serializing and transporting data over the +network. Bolt runs as a library included in your application so all data access +has to go through your application's process. This brings data closer to your +application but limits multi-process access to the data. + + +### LevelDB, RocksDB + +LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that +they are libraries bundled into the application, however, their underlying +structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes +random writes by using a write ahead log and multi-tiered, sorted files called +SSTables. Bolt uses a B+tree internally and only a single file. Both approaches +have trade-offs. + +If you require a high random write throughput (>10,000 w/sec) or you need to use +spinning disks then LevelDB could be a good choice. If your application is +read-heavy or does a lot of range scans then Bolt could be a good choice. + +One other important consideration is that LevelDB does not have transactions. +It supports batch writing of key/values pairs and it supports read snapshots +but it will not give you the ability to do a compare-and-swap operation safely. +Bolt supports fully serializable ACID transactions. + + +### LMDB + +Bolt was originally a port of LMDB so it is architecturally similar. Both use +a B+tree, have ACID semantics with fully serializable transactions, and support +lock-free MVCC using a single writer and multiple readers. + +The two projects have somewhat diverged. LMDB heavily focuses on raw performance +while Bolt has focused on simplicity and ease of use. For example, LMDB allows +several unsafe actions such as direct writes for the sake of performance. Bolt +opts to disallow actions which can leave the database in a corrupted state. The +only exception to this in Bolt is `DB.NoSync`. + +There are also a few differences in API. LMDB requires a maximum mmap size when +opening an `mdb_env` whereas Bolt will handle incremental mmap resizing +automatically. LMDB overloads the getter and setter functions with multiple +flags whereas Bolt splits these specialized cases into their own functions. + + +## Caveats & Limitations + +It's important to pick the right tool for the job and Bolt is no exception. +Here are a few things to note when evaluating and using Bolt: + +* Bolt is good for read intensive workloads. Sequential write performance is + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. + +* Bolt uses a B+tree internally so there can be a lot of random page access. + SSDs provide a significant performance boost over spinning disks. + +* Try to avoid long running read transactions. Bolt uses copy-on-write so + old pages cannot be reclaimed while an old transaction is using them. + +* Byte slices returned from Bolt are only valid during a transaction. Once the + transaction has been committed or rolled back then the memory they point to + can be reused by a new page or can be unmapped from virtual memory and you'll + see an `unexpected fault address` panic when accessing it. + +* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for + buckets that have random inserts will cause your database to have very poor + page utilization. + +* Use larger buckets in general. Smaller buckets causes poor page utilization + once they become larger than the page size (typically 4KB). + +* Bulk loading a lot of random writes into a new bucket can be slow as the + page will not split until the transaction is committed. Randomly inserting + more than 100,000 key/value pairs into a single new bucket in a single + transaction is not advised. + +* Bolt uses a memory-mapped file so the underlying operating system handles the + caching of the data. Typically, the OS will cache as much of the file as it + can in memory and will release memory as needed to other processes. This means + that Bolt can show very high memory usage when working with large databases. + However, this is expected and the OS will release memory as needed. Bolt can + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. + +* The data structures in the Bolt database are memory mapped so the data file + will be endian specific. This means that you cannot copy a Bolt file from a + little endian machine to a big endian machine and have it work. For most + users this is not a concern since most modern CPUs are little endian. + +* Because of the way pages are laid out on disk, Bolt cannot truncate data files + and return free pages back to the disk. Instead, Bolt maintains a free list + of unused pages within its data file. These free pages can be reused by later + transactions. This works well for many use cases as databases generally tend + to grow. However, it's important to note that deleting large chunks of data + will not allow you to reclaim that space on disk. + + For more information on page allocation, [see this comment][page-allocation]. + +[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 + + +## Reading the Source + +Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + +## Other Projects Using Bolt + +Below is a list of public, open source projects that use Bolt: + +* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. +* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database. +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. +* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. + +If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/boltdb/bolt/appveyor.yml b/vendor/github.com/boltdb/bolt/appveyor.yml new file mode 100644 index 00000000..6e26e941 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/appveyor.yml @@ -0,0 +1,18 @@ +version: "{build}" + +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\boltdb\bolt + +environment: + GOPATH: c:\gopath + +install: + - echo %PATH% + - echo %GOPATH% + - go version + - go env + - go get -v -t ./... + +build_script: + - go test -v ./... diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/boltdb/bolt/bolt_386.go new file mode 100644 index 00000000..e659bfb9 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_386.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/boltdb/bolt/bolt_amd64.go new file mode 100644 index 00000000..cca6b7eb --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_amd64.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/boltdb/bolt/bolt_arm.go new file mode 100644 index 00000000..e659bfb9 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/boltdb/bolt/bolt_arm64.go new file mode 100644 index 00000000..6d230935 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm64.go @@ -0,0 +1,9 @@ +// +build arm64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/boltdb/bolt/bolt_linux.go new file mode 100644 index 00000000..2b676661 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_linux.go @@ -0,0 +1,10 @@ +package bolt + +import ( + "syscall" +) + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/boltdb/bolt/bolt_openbsd.go new file mode 100644 index 00000000..7058c3d7 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_openbsd.go @@ -0,0 +1,27 @@ +package bolt + +import ( + "syscall" + "unsafe" +) + +const ( + msAsync = 1 << iota // perform asynchronous writes + msSync // perform synchronous writes + msInvalidate // invalidate cached data +) + +func msync(db *DB) error { + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) + if errno != 0 { + return errno + } + return nil +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/boltdb/bolt/bolt_ppc.go new file mode 100644 index 00000000..645ddc3e --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc.go @@ -0,0 +1,9 @@ +// +build ppc + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/boltdb/bolt/bolt_ppc64.go new file mode 100644 index 00000000..2dc6be02 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64.go @@ -0,0 +1,9 @@ +// +build ppc64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go new file mode 100644 index 00000000..8351e129 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go @@ -0,0 +1,9 @@ +// +build ppc64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/boltdb/bolt/bolt_s390x.go new file mode 100644 index 00000000..f4dd26bb --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_s390x.go @@ -0,0 +1,9 @@ +// +build s390x + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/boltdb/bolt/bolt_unix.go new file mode 100644 index 00000000..cad62dda --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix.go @@ -0,0 +1,89 @@ +// +build !windows,!plan9,!solaris + +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + flag := syscall.LOCK_SH + if exclusive { + flag = syscall.LOCK_EX + } + + // Otherwise attempt to obtain an exclusive lock. + err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := syscall.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} + +// NOTE: This function is copied from stdlib because it is not available on darwin. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go new file mode 100644 index 00000000..307bf2b3 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -0,0 +1,90 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Whence = 0 + lock.Pid = 0 + if exclusive { + lock.Type = syscall.F_WRLCK + } else { + lock.Type = syscall.F_RDLCK + } + err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/boltdb/bolt/bolt_windows.go new file mode 100644 index 00000000..d538e6af --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_windows.go @@ -0,0 +1,144 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + lockExt = ".lock" + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + // Create a separate lock file on windows because a process + // cannot share an exclusive lock on the same file. This is + // needed during Tx.WriteTo(). + f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) + if err != nil { + return err + } + db.lockfile = f + + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + + err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) + db.lockfile.Close() + os.Remove(db.path+lockExt) + return err +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(sz >> 32) + sizehi := uint32(sz) & 0xffffffff + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + if err := syscall.UnmapViewOfFile(addr); err != nil { + return os.NewSyscallError("UnmapViewOfFile", err) + } + return nil +} diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/boltdb/bolt/boltsync_unix.go new file mode 100644 index 00000000..f5044252 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/boltsync_unix.go @@ -0,0 +1,8 @@ +// +build !windows,!plan9,!linux,!openbsd + +package bolt + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/boltdb/bolt/bucket.go new file mode 100644 index 00000000..d2f8c524 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bucket.go @@ -0,0 +1,748 @@ +package bolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = (1 << 31) - 2 +) + +const ( + maxUint = ^uint(0) + minUint = 0 + maxInt = int(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.CursorCount++ + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key. + if bytes.Equal(key, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } else { + return nil, ErrIncompatibleValue + } + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEach(func(k, v []byte) error { + if v == nil { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + _, _, flags := c.seek(key) + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Stat returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * int(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += used + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += used + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += int(lastElement.pos + lastElement.ksize) + s.BranchInuse += used + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = (depth + 1) + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, 0, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgid) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + len(inode.key) + len(inode.value) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() int { + return b.tx.db.pageSize / 4 +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgid pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgid]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgid) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgid] = n + + // Update statistics. + b.tx.stats.NodeCount++ + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/boltdb/bolt/cursor.go new file mode 100644 index 00000000..1be9f35e --- /dev/null +++ b/vendor/github.com/boltdb/bolt/cursor.go @@ -0,0 +1,400 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v + +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.index >= ref.count() { + return nil, nil, 0 + } + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) first() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgid pgid) { + p, n := c.bucket.pageNode(pgid) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(int(ref.index)) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/boltdb/bolt/db.go new file mode 100644 index 00000000..501d36aa --- /dev/null +++ b/vendor/github.com/boltdb/bolt/db.go @@ -0,0 +1,993 @@ +package bolt + +import ( + "errors" + "fmt" + "hash/fnv" + "log" + "os" + "runtime" + "runtime/debug" + "strings" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + + path string + file *os.File + lockfile *os.File // windows only + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + filesz int // current on disk file size + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + freelist *freelist + stats Stats + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + var db = &DB{opened: true} + + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } + + // Open data file and separate sync handler for metadata writes. + db.path = path + var err error + if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + return nil, err + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + return nil, err + } + } else { + // Read the first meta page to determine the page size. + var buf [0x1000]byte + if _, err := db.file.ReadAt(buf[:], 0); err == nil { + m := db.pageInBuffer(buf[:], 0).meta() + if err := m.validate(); err != nil { + return nil, err + } + db.pageSize = int(m.pageSize) + } + } + + // Memory map the data file. + if err := db.mmap(options.InitialMmapSize); err != nil { + _ = db.close() + return nil, err + } + + // Read in the freelist. + db.freelist = newFreelist() + db.freelist.read(db.page(db.meta().freelist)) + + // Mark the database as opened and return. + return db, nil +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) error { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + var size = int(info.Size()) + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err := db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + if err := mmap(db, size); err != nil { + return err + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. + if err := db.meta0.validate(); err != nil { + return err + } + if err := db.meta1.validate(); err != nil { + return err + } + + return nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Set the page size to the OS page size. + db.pageSize = os.Getpagesize() + + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf[:], pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf[:], pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf[:], pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + + return nil +} + +// Close releases all database resources. +// All transactions must be closed before closing the database. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.RLock() + defer db.mmaplock.RUnlock() + + return db.close() +} + +func (db *DB) close() error { + if !db.opened { + return nil + } + + db.opened = false + + db.freelist = nil + db.path = "" + + // Clear ops. + db.ops.writeAt = nil + + // Close the mmap. + if err := db.munmap(); err != nil { + return err + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + if err := funlock(db); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + return fmt.Errorf("db file close: %s", err) + } + db.file = nil + } + + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be dependent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + + // Free any pages associated with closed read-only transactions. + var minid txid = 0xFFFFFFFFFFFFFFFF + for _, t := range db.txs { + if t.meta.txid < minid { + minid = t.meta.txid + } + } + if minid > 0 { + db.freelist.release(minid - 1) + } + + return t, nil +} + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + db.txs = append(db.txs[:i], db.txs[i+1:]...) + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + if err := t.Rollback(); err != nil { + return err + } + + return nil +} + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + if c.err != nil { + c.err <- err + } + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + if db.meta0.txid > db.meta1.txid { + return db.meta0 + } + return db.meta1 +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(count int) (*page, error) { + // Allocate a temporary buffer for the page. + buf := make([]byte, count*db.pageSize) + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + db.filesz = sz + return nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, +} + +// Stats represents statistics about the database. +type Stats struct { + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions + + TxStats TxStats // global, ongoing stats. +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = other.TxN - s.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +func (s *Stats) add(other *Stats) { + s.TxStats.add(&other.TxStats) +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum + } else if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid { + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} + +func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } +func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } + +func printstack() { + stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") + fmt.Fprintln(os.Stderr, stack) +} diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/boltdb/bolt/doc.go new file mode 100644 index 00000000..cc937845 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/doc.go @@ -0,0 +1,44 @@ +/* +Package bolt implements a low-level key/value store in pure Go. It supports +fully serializable transactions, ACID semantics, and lock-free MVCC with +multiple readers and a single writer. Bolt can be used for projects that +want a simple data store without the need to add large dependencies such as +Postgres or MySQL. + +Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is +optimized for fast read access and does not require recovery in the event of a +system crash. Transactions which have not finished committing will simply be +rolled back in the event of a crash. + +The design of Bolt is based on Howard Chu's LMDB database project. + +Bolt currently works on Windows, Mac OS X, and Linux. + + +Basics + +There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is +a collection of buckets and is represented by a single file on disk. A bucket is +a collection of unique keys that are associated with values. + +Transactions provide either read-only or read-write access to the database. +Read-only transactions can retrieve key/value pairs and can use Cursors to +iterate over the dataset sequentially. Read-write transactions can create and +delete buckets and can insert and remove keys. Only one read-write transaction +is allowed at a time. + + +Caveats + +The database uses a read-only, memory-mapped data file to ensure that +applications cannot corrupt the database, however, this means that keys and +values returned from Bolt cannot be changed. Writing to a read-only byte slice +will cause Go to panic. + +Keys and values retrieved from the database are only valid for the life of +the transaction. When used outside the transaction, these byte slices can +point to different data or can point to invalid memory which will cause a panic. + + +*/ +package bolt diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/boltdb/bolt/errors.go new file mode 100644 index 00000000..6883786d --- /dev/null +++ b/vendor/github.com/boltdb/bolt/errors.go @@ -0,0 +1,70 @@ +package bolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when a data file is not a Bolt-formatted database. + ErrInvalid = errors.New("invalid database") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/boltdb/bolt/freelist.go new file mode 100644 index 00000000..0161948f --- /dev/null +++ b/vendor/github.com/boltdb/bolt/freelist.go @@ -0,0 +1,242 @@ +package bolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + ids []pgid // all free and available free page ids. + pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist() *freelist { + return &freelist{ + pending: make(map[txid][]pgid), + cache: make(map[pgid]bool), + } +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// free_count returns count of free pages +func (f *freelist) free_count() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, list := range f.pending { + count += len(list) + } + return count +} + +// all returns a list of all free ids and all pending ids in one sorted list. +func (f *freelist) all() []pgid { + m := make(pgids, 0) + + for _, list := range f.pending { + m = append(m, list...) + } + + sort.Sort(m) + return pgids(f.ids).merge(m) +} + +// allocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) allocate(n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + var ids = f.pending[txid] + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + + // Add to the freelist and cache. + ids = append(ids, id) + f.cache[id] = true + } + f.pending[txid] = ids +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, ids := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + for _, id := range f.pending[txid] { + delete(f.cache, id) + } + + // Remove pages from pending list. + delete(f.pending, txid) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + idx, count := 0, int(p.count) + if count == 0xFFFF { + idx = 1 + count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + } + + // Copy the list of page ids from the freelist. + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] + f.ids = make([]pgid, len(ids)) + copy(f.ids, ids) + + // Make sure they're sorted. + sort.Sort(pgids(f.ids)) + + // Rebuild the page cache. + f.reindex() +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + ids := f.all() + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + if len(ids) < 0xFFFF { + p.count = uint16(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) + } else { + p.count = 0xFFFF + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.ids { + if !pcache[id] { + a = append(a, id) + } + } + f.ids = a + + // Once the available list is rebuilt then rebuild the free cache so that + // it includes the available and pending free pages. + f.reindex() +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + f.cache = make(map[pgid]bool) + for _, id := range f.ids { + f.cache[id] = true + } + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + f.cache[pendingID] = true + } + } +} diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/boltdb/bolt/node.go new file mode 100644 index 00000000..e9d64af8 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/node.go @@ -0,0 +1,599 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + } + return sz +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v int) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() int { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Loop over each item and write it to the page. + b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // If the length of key+value is larger than the max allocation size + // then we need to reallocate the byte array pointer. + // + // See: https://github.com/boltdb/bolt/pull/335 + klen, vlen := len(item.key), len(item.value) + if len(b) < klen+vlen { + b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] + } + + // Write data for the element to the end of the page. + copy(b[0:], item.key) + b = b[klen:] + copy(b[0:], item.value) + b = b[vlen:] + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize int) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize int) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz int) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = i + inode := n.inodes[i] + elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if i >= minKeysPerPage && sz+elsize > threshold { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(tx.db.pageSize) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/boltdb/bolt/page.go new file mode 100644 index 00000000..818aa1b1 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/page.go @@ -0,0 +1,172 @@ +package bolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) + +const minKeysPerPage = 2 + +const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) +const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafe.Pointer(&p.ptr)) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize] +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } else if len(b) == 0 { + return a + } + + // Create a list to hold all elements from both lists. + merged := make(pgids, 0, len(a)+len(b)) + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + merged = append(merged, follow...) + + return merged +} diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/boltdb/bolt/tx.go new file mode 100644 index 00000000..b8510fdb --- /dev/null +++ b/vendor/github.com/boltdb/bolt/tx.go @@ -0,0 +1,666 @@ +package bolt + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + if err := fn(k, tx.root.Bucket(k)); err != nil { + return err + } + return nil + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.Rebalance > 0 { + tx.stats.RebalanceTime += time.Since(startTime) + } + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.SpillTime += time.Since(startTime) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + opgid := tx.meta.pgid + + // Free the freelist and allocate new pages for it. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + // Only the first consistency error is reported in the panic. + if tx.db.StrictMode { + ch := tx.Check() + var errs []string + for { + err, ok := <-ch + if !ok { + break + } + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + panic("check fail: " + strings.Join(errs, "\n")) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.WriteTime += time.Since(startTime) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.rollback() + return nil +} + +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove transaction ref & writer lock. + tx.db.rwtx = nil + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + + // Clear all references. + tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. Use WriteTo() instead. +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader with WriteFlag + f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err + } + defer func() { _ = f.Close() }() + + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 0 copy: %s", err) + } + + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + return n, fmt.Errorf("seek: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + return n, err + } + + return n, f.Close() +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + err = tx.Copy(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + ch := make(chan error) + go tx.check(ch) + return ch +} + +func (tx *Tx) check(ch chan error) { + // Check if any pages are double freed. + freed := make(map[pgid]bool) + for _, id := range tx.db.freelist.all() { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, 0, func(p *page, _ int) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references", int(id)) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) + } + }) + + // Check each bucket within this bucket. + _ = b.ForEach(func(k, v []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, ch) + } + return nil + }) +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.PageCount++ + tx.stats.PageAlloc += count * tx.db.pageSize + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + size := (int(p.overflow) + 1) * tx.db.pageSize + offset := int64(p.id) * int64(tx.db.pageSize) + + // Write out page in "max allocation" sized chunks. + ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) + for { + // Limit our write to our max allocation size. + sz := size + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + + // Write chunk to disk. + buf := ptr[:sz] + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.Write++ + + // Exit inner for loop if we've written all the chunks. + size -= sz + if size == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Clear out page cache. + tx.pages = make(map[pgid]*page) + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.Write++ + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary buffered page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + return p + } + } + + // Otherwise return directly from the mmap. + return tx.db.page(id) +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { + p := tx.page(pgid) + + // Execute function. + fn(p, depth) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPage(elem.pgid, depth+1, fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + PageCount int // number of page allocations + PageAlloc int // total bytes allocated + + // Cursor statistics. + CursorCount int // number of cursors created + + // Node statistics + NodeCount int // number of node allocations + NodeDeref int // number of node dereferences + + // Rebalance statistics. + Rebalance int // number of node rebalances + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + Split int // number of nodes split + Spill int // number of nodes spilled + SpillTime time.Duration // total time spent spilling + + // Write statistics. + Write int // number of writes performed + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.PageCount += other.PageCount + s.PageAlloc += other.PageAlloc + s.CursorCount += other.CursorCount + s.NodeCount += other.NodeCount + s.NodeDeref += other.NodeDeref + s.Rebalance += other.Rebalance + s.RebalanceTime += other.RebalanceTime + s.Split += other.Split + s.Spill += other.Spill + s.SpillTime += other.SpillTime + s.Write += other.Write + s.WriteTime += other.WriteTime +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.PageCount - other.PageCount + diff.PageAlloc = s.PageAlloc - other.PageAlloc + diff.CursorCount = s.CursorCount - other.CursorCount + diff.NodeCount = s.NodeCount - other.NodeCount + diff.NodeDeref = s.NodeDeref - other.NodeDeref + diff.Rebalance = s.Rebalance - other.Rebalance + diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime + diff.Split = s.Split - other.Split + diff.Spill = s.Spill - other.Spill + diff.SpillTime = s.SpillTime - other.SpillTime + diff.Write = s.Write - other.Write + diff.WriteTime = s.WriteTime - other.WriteTime + return diff +} diff --git a/vendor/github.com/cloudfoundry/gosigar/.gitignore b/vendor/github.com/cloudfoundry/gosigar/.gitignore new file mode 100644 index 00000000..8000dd9d --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/.gitignore @@ -0,0 +1 @@ +.vagrant diff --git a/vendor/github.com/cloudfoundry/gosigar/.travis.yml b/vendor/github.com/cloudfoundry/gosigar/.travis.yml new file mode 100644 index 00000000..2a9c5d0c --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.2 + +install: + - 'go install github.com/onsi/ginkgo/ginkgo' +script: 'ginkgo -r' diff --git a/vendor/github.com/cloudfoundry/gosigar/LICENSE b/vendor/github.com/cloudfoundry/gosigar/LICENSE new file mode 100644 index 00000000..11069edd --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/cloudfoundry/gosigar/NOTICE b/vendor/github.com/cloudfoundry/gosigar/NOTICE new file mode 100644 index 00000000..fda553b5 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/NOTICE @@ -0,0 +1,9 @@ +Copyright (c) [2009-2011] VMware, Inc. All Rights Reserved. + +This product is licensed to you under the Apache License, Version 2.0 (the "License"). +You may not use this product except in compliance with the License. + +This product includes a number of subcomponents with +separate copyright notices and license terms. Your use of these +subcomponents is subject to the terms and conditions of the +subcomponent's license, as noted in the LICENSE file. \ No newline at end of file diff --git a/vendor/github.com/cloudfoundry/gosigar/README.md b/vendor/github.com/cloudfoundry/gosigar/README.md new file mode 100644 index 00000000..90d51f9b --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/README.md @@ -0,0 +1,22 @@ +# Go sigar + +## Overview + +Go sigar is a golang implementation of the +[sigar API](https://github.com/hyperic/sigar). The Go version of +sigar has a very similar interface, but is being written from scratch +in pure go/cgo, rather than cgo bindings for libsigar. + +## Test drive + + $ go get github.com/cloudfoundry/gosigar + $ cd $GOPATH/src/github.com/cloudfoundry/gosigar/examples + $ go run uptime.go + +## Supported platforms + +Currently targeting modern flavors of darwin and linux. + +## License + +Apache 2.0 diff --git a/vendor/github.com/cloudfoundry/gosigar/Vagrantfile b/vendor/github.com/cloudfoundry/gosigar/Vagrantfile new file mode 100644 index 00000000..6fd990c1 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/Vagrantfile @@ -0,0 +1,25 @@ +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vm.box = "hashicorp/precise64" + config.vm.provision "shell", inline: "mkdir -p /home/vagrant/go" + config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/cloudfoundry/gosigar" + config.vm.provision "shell", inline: "chown -R vagrant:vagrant /home/vagrant/go" + install_go = <<-BASH + set -e + +if [ ! -d "/usr/local/go" ]; then + cd /tmp && wget https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz + cd /usr/local + tar xvzf /tmp/go1.3.3.linux-amd64.tar.gz + echo 'export GOPATH=/home/vagrant/go; export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin' >> /home/vagrant/.bashrc +fi +export GOPATH=/home/vagrant/go +export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin +/usr/local/go/bin/go get -u github.com/onsi/ginkgo/ginkgo +/usr/local/go/bin/go get -u github.com/onsi/gomega; +BASH + config.vm.provision "shell", inline: 'apt-get install -y git-core' + config.vm.provision "shell", inline: install_go +end diff --git a/vendor/github.com/cloudfoundry/gosigar/concrete_sigar.go b/vendor/github.com/cloudfoundry/gosigar/concrete_sigar.go new file mode 100644 index 00000000..0e80aa4b --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/concrete_sigar.go @@ -0,0 +1,69 @@ +package sigar + +import ( + "time" +) + +type ConcreteSigar struct{} + +func (c *ConcreteSigar) CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) { + // samplesCh is buffered to 1 value to immediately return first CPU sample + samplesCh := make(chan Cpu, 1) + + stopCh := make(chan struct{}) + + go func() { + var cpuUsage Cpu + + // Immediately provide non-delta value. + // samplesCh is buffered to 1 value, so it will not block. + cpuUsage.Get() + samplesCh <- cpuUsage + + ticker := time.NewTicker(collectionInterval) + + for { + select { + case <-ticker.C: + previousCpuUsage := cpuUsage + + cpuUsage.Get() + + select { + case samplesCh <- cpuUsage.Delta(previousCpuUsage): + default: + // Include default to avoid channel blocking + } + + case <-stopCh: + return + } + } + }() + + return samplesCh, stopCh +} + +func (c *ConcreteSigar) GetLoadAverage() (LoadAverage, error) { + l := LoadAverage{} + err := l.Get() + return l, err +} + +func (c *ConcreteSigar) GetMem() (Mem, error) { + m := Mem{} + err := m.Get() + return m, err +} + +func (c *ConcreteSigar) GetSwap() (Swap, error) { + s := Swap{} + err := s.Get() + return s, err +} + +func (c *ConcreteSigar) GetFileSystemUsage(path string) (FileSystemUsage, error) { + f := FileSystemUsage{} + err := f.Get(path) + return f, err +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_darwin.go b/vendor/github.com/cloudfoundry/gosigar/sigar_darwin.go new file mode 100644 index 00000000..e3a8c4b9 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_darwin.go @@ -0,0 +1,467 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "syscall" + "time" + "unsafe" +) + +func (self *LoadAverage) Get() error { + avg := []C.double{0, 0, 0} + + C.getloadavg(&avg[0], C.int(len(avg))) + + self.One = float64(avg[0]) + self.Five = float64(avg[1]) + self.Fifteen = float64(avg[2]) + + return nil +} + +func (self *Uptime) Get() error { + tv := syscall.Timeval32{} + + if err := sysctlbyname("kern.boottime", &tv); err != nil { + return err + } + + self.Length = time.Since(time.Unix(int64(tv.Sec), int64(tv.Usec)*1000)).Seconds() + + return nil +} + +func (self *Mem) Get() error { + var vmstat C.vm_statistics_data_t + + if err := sysctlbyname("hw.memsize", &self.Total); err != nil { + return err + } + + if err := vm_info(&vmstat); err != nil { + return err + } + + kern := uint64(vmstat.inactive_count) << 12 + self.Free = uint64(vmstat.free_count) << 12 + + self.Used = self.Total - self.Free + self.ActualFree = self.Free + kern + self.ActualUsed = self.Used - kern + + return nil +} + +type xsw_usage struct { + Total, Avail, Used uint64 +} + +func (self *Swap) Get() error { + sw_usage := xsw_usage{} + + if err := sysctlbyname("vm.swapusage", &sw_usage); err != nil { + return err + } + + self.Total = sw_usage.Total + self.Used = sw_usage.Used + self.Free = sw_usage.Avail + + return nil +} + +func (self *Cpu) Get() error { + var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT + var cpuload C.host_cpu_load_info_data_t + + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_CPU_LOAD_INFO, + C.host_info_t(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_statistics error=%d", status) + } + + self.User = uint64(cpuload.cpu_ticks[C.CPU_STATE_USER]) + self.Sys = uint64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) + self.Idle = uint64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) + self.Nice = uint64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) + + return nil +} + +func (self *CpuList) Get() error { + var count C.mach_msg_type_number_t + var cpuload *C.processor_cpu_load_info_data_t + var ncpu C.natural_t + + status := C.host_processor_info(C.host_t(C.mach_host_self()), + C.PROCESSOR_CPU_LOAD_INFO, + &ncpu, + (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_processor_info error=%d", status) + } + + // jump through some cgo casting hoops and ensure we properly free + // the memory that cpuload points to + target := C.vm_map_t(C.mach_task_self_) + address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) + defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) + + // the body of struct processor_cpu_load_info + // aka processor_cpu_load_info_data_t + var cpu_ticks [C.CPU_STATE_MAX]uint32 + + // copy the cpuload array to a []byte buffer + // where we can binary.Read the data + size := int(ncpu) * binary.Size(cpu_ticks) + buf := C.GoBytes(unsafe.Pointer(cpuload), C.int(size)) + + bbuf := bytes.NewBuffer(buf) + + self.List = make([]Cpu, 0, ncpu) + + for i := 0; i < int(ncpu); i++ { + cpu := Cpu{} + + err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) + if err != nil { + return err + } + + cpu.User = uint64(cpu_ticks[C.CPU_STATE_USER]) + cpu.Sys = uint64(cpu_ticks[C.CPU_STATE_SYSTEM]) + cpu.Idle = uint64(cpu_ticks[C.CPU_STATE_IDLE]) + cpu.Nice = uint64(cpu_ticks[C.CPU_STATE_NICE]) + + self.List = append(self.List, cpu) + } + + return nil +} + +func (self *FileSystemList) Get() error { + num, err := getfsstat(nil, C.MNT_NOWAIT) + if num < 0 { + return err + } + + buf := make([]syscall.Statfs_t, num) + + num, err = getfsstat(buf, C.MNT_NOWAIT) + if err != nil { + return err + } + + fslist := make([]FileSystem, 0, num) + + for i := 0; i < num; i++ { + fs := FileSystem{} + + fs.DirName = bytePtrToString(&buf[i].Mntonname[0]) + fs.DevName = bytePtrToString(&buf[i].Mntfromname[0]) + fs.SysTypeName = bytePtrToString(&buf[i].Fstypename[0]) + + fslist = append(fslist, fs) + } + + self.List = fslist + + return err +} + +func (self *ProcList) Get() error { + n := C.proc_listpids(C.PROC_ALL_PIDS, 0, nil, 0) + if n <= 0 { + return syscall.EINVAL + } + buf := make([]byte, n) + n = C.proc_listpids(C.PROC_ALL_PIDS, 0, unsafe.Pointer(&buf[0]), n) + if n <= 0 { + return syscall.ENOMEM + } + + var pid int32 + num := int(n) / binary.Size(pid) + list := make([]int, 0, num) + bbuf := bytes.NewBuffer(buf) + + for i := 0; i < num; i++ { + if err := binary.Read(bbuf, binary.LittleEndian, &pid); err != nil { + return err + } + if pid == 0 { + continue + } + + list = append(list, int(pid)) + } + + self.List = list + + return nil +} + +func (self *ProcState) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.Name = C.GoString(&info.pbsd.pbi_comm[0]) + + switch info.pbsd.pbi_status { + case C.SIDL: + self.State = RunStateIdle + case C.SRUN: + self.State = RunStateRun + case C.SSLEEP: + self.State = RunStateSleep + case C.SSTOP: + self.State = RunStateStop + case C.SZOMB: + self.State = RunStateZombie + default: + self.State = RunStateUnknown + } + + self.Ppid = int(info.pbsd.pbi_ppid) + + self.Tty = int(info.pbsd.e_tdev) + + self.Priority = int(info.ptinfo.pti_priority) + + self.Nice = int(info.pbsd.pbi_nice) + + return nil +} + +func (self *ProcMem) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.Size = uint64(info.ptinfo.pti_virtual_size) + self.Resident = uint64(info.ptinfo.pti_resident_size) + self.PageFaults = uint64(info.ptinfo.pti_faults) + + return nil +} + +func (self *ProcTime) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.User = + uint64(info.ptinfo.pti_total_user) / uint64(time.Millisecond) + + self.Sys = + uint64(info.ptinfo.pti_total_system) / uint64(time.Millisecond) + + self.Total = self.User + self.Sys + + self.StartTime = (uint64(info.pbsd.pbi_start_tvsec) * 1000) + + (uint64(info.pbsd.pbi_start_tvusec) / 1000) + + return nil +} + +func (self *ProcArgs) Get(pid int) error { + var args []string + + argv := func(arg string) { + args = append(args, arg) + } + + err := kern_procargs(pid, nil, argv, nil) + + self.List = args + + return err +} + +func (self *ProcExe) Get(pid int) error { + exe := func(arg string) { + self.Name = arg + } + + return kern_procargs(pid, exe, nil, nil) +} + +// wrapper around sysctl KERN_PROCARGS2 +// callbacks params are optional, +// up to the caller as to which pieces of data they want +func kern_procargs(pid int, + exe func(string), + argv func(string), + env func(string, string)) error { + + mib := []C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)} + argmax := uintptr(C.ARG_MAX) + buf := make([]byte, argmax) + err := sysctl(mib, &buf[0], &argmax, nil, 0) + if err != nil { + return nil + } + + bbuf := bytes.NewBuffer(buf) + bbuf.Truncate(int(argmax)) + + var argc int32 + binary.Read(bbuf, binary.LittleEndian, &argc) + + path, err := bbuf.ReadBytes(0) + if exe != nil { + exe(string(chop(path))) + } + + // skip trailing \0's + for { + c, _ := bbuf.ReadByte() + if c != 0 { + bbuf.UnreadByte() + break // start of argv[0] + } + } + + for i := 0; i < int(argc); i++ { + arg, err := bbuf.ReadBytes(0) + if err == io.EOF { + break + } + if argv != nil { + argv(string(chop(arg))) + } + } + + if env == nil { + return nil + } + + delim := []byte{61} // "=" + + for { + line, err := bbuf.ReadBytes(0) + if err == io.EOF || line[0] == 0 { + break + } + pair := bytes.SplitN(chop(line), delim, 2) + env(string(pair[0]), string(pair[1])) + } + + return nil +} + +// XXX copied from zsyscall_darwin_amd64.go +func sysctl(mib []C.int, old *byte, oldlen *uintptr, + new *byte, newlen uintptr) (err error) { + var p0 unsafe.Pointer + p0 = unsafe.Pointer(&mib[0]) + _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = e1 + } + return +} + +func vm_info(vmstat *C.vm_statistics_data_t) error { + var count C.mach_msg_type_number_t = C.HOST_VM_INFO_COUNT + + status := C.host_statistics( + C.host_t(C.mach_host_self()), + C.HOST_VM_INFO, + C.host_info_t(unsafe.Pointer(vmstat)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_statistics=%d", status) + } + + return nil +} + +// generic Sysctl buffer unmarshalling +func sysctlbyname(name string, data interface{}) (err error) { + val, err := syscall.Sysctl(name) + if err != nil { + return err + } + + buf := []byte(val) + + switch v := data.(type) { + case *uint64: + *v = *(*uint64)(unsafe.Pointer(&buf[0])) + return + } + + bbuf := bytes.NewBuffer([]byte(val)) + return binary.Read(bbuf, binary.LittleEndian, data) +} + +// syscall.Getfsstat() wrapper is broken, roll our own to workaround. +func getfsstat(buf []syscall.Statfs_t, flags int) (n int, err error) { + var ptr uintptr + var size uintptr + + if len(buf) > 0 { + ptr = uintptr(unsafe.Pointer(&buf[0])) + size = unsafe.Sizeof(buf[0]) * uintptr(len(buf)) + } else { + ptr = uintptr(0) + size = uintptr(0) + } + + trap := uintptr(syscall.SYS_GETFSSTAT64) + ret, _, errno := syscall.Syscall(trap, ptr, size, uintptr(flags)) + + n = int(ret) + if errno != 0 { + err = errno + } + + return +} + +func task_info(pid int, info *C.struct_proc_taskallinfo) error { + size := C.int(unsafe.Sizeof(*info)) + ptr := unsafe.Pointer(info) + + n := C.proc_pidinfo(C.int(pid), C.PROC_PIDTASKALLINFO, 0, ptr, size) + if n != size { + return syscall.ENOMEM + } + + return nil +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_format.go b/vendor/github.com/cloudfoundry/gosigar/sigar_format.go new file mode 100644 index 00000000..d80a64e8 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_format.go @@ -0,0 +1,126 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "time" +) + +// Go version of apr_strfsize +func FormatSize(size uint64) string { + ord := []string{"K", "M", "G", "T", "P", "E"} + o := 0 + buf := new(bytes.Buffer) + w := bufio.NewWriter(buf) + + if size < 973 { + fmt.Fprintf(w, "%3d ", size) + w.Flush() + return buf.String() + } + + for { + remain := size & 1023 + size >>= 10 + + if size >= 973 { + o++ + continue + } + + if size < 9 || (size == 9 && remain < 973) { + remain = ((remain * 5) + 256) / 512 + if remain >= 10 { + size++ + remain = 0 + } + + fmt.Fprintf(w, "%d.%d%s", size, remain, ord[o]) + break + } + + if remain >= 512 { + size++ + } + + fmt.Fprintf(w, "%3d%s", size, ord[o]) + break + } + + w.Flush() + return buf.String() +} + +func FormatPercent(percent float64) string { + return strconv.FormatFloat(percent, 'f', -1, 64) + "%" +} + +func (self *FileSystemUsage) UsePercent() float64 { + b_used := (self.Total - self.Free) / 1024 + b_avail := self.Avail / 1024 + utotal := b_used + b_avail + used := b_used + + if utotal != 0 { + u100 := used * 100 + pct := u100 / utotal + if u100%utotal != 0 { + pct += 1 + } + return (float64(pct) / float64(100)) * 100.0 + } + + return 0.0 +} + +func (self *Uptime) Format() string { + buf := new(bytes.Buffer) + w := bufio.NewWriter(buf) + uptime := uint64(self.Length) + + days := uptime / (60 * 60 * 24) + + if days != 0 { + s := "" + if days > 1 { + s = "s" + } + fmt.Fprintf(w, "%d day%s, ", days, s) + } + + minutes := uptime / 60 + hours := minutes / 60 + hours %= 24 + minutes %= 60 + + fmt.Fprintf(w, "%2d:%02d", hours, minutes) + + w.Flush() + return buf.String() +} + +func (self *ProcTime) FormatStartTime() string { + if self.StartTime == 0 { + return "00:00" + } + start := time.Unix(int64(self.StartTime)/1000, 0) + format := "Jan02" + if time.Since(start).Seconds() < (60 * 60 * 24) { + format = "15:04" + } + return start.Format(format) +} + +func (self *ProcTime) FormatTotal() string { + t := self.Total / 1000 + ss := t % 60 + t /= 60 + mm := t % 60 + t /= 60 + hh := t % 24 + return fmt.Sprintf("%02d:%02d:%02d", hh, mm, ss) +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_interface.go b/vendor/github.com/cloudfoundry/gosigar/sigar_interface.go new file mode 100644 index 00000000..dd72a76b --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_interface.go @@ -0,0 +1,141 @@ +package sigar + +import ( + "time" +) + +type Sigar interface { + CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) + GetLoadAverage() (LoadAverage, error) + GetMem() (Mem, error) + GetSwap() (Swap, error) + GetFileSystemUsage(string) (FileSystemUsage, error) +} + +type Cpu struct { + User uint64 + Nice uint64 + Sys uint64 + Idle uint64 + Wait uint64 + Irq uint64 + SoftIrq uint64 + Stolen uint64 +} + +func (cpu *Cpu) Total() uint64 { + return cpu.User + cpu.Nice + cpu.Sys + cpu.Idle + + cpu.Wait + cpu.Irq + cpu.SoftIrq + cpu.Stolen +} + +func (cpu Cpu) Delta(other Cpu) Cpu { + return Cpu{ + User: cpu.User - other.User, + Nice: cpu.Nice - other.Nice, + Sys: cpu.Sys - other.Sys, + Idle: cpu.Idle - other.Idle, + Wait: cpu.Wait - other.Wait, + Irq: cpu.Irq - other.Irq, + SoftIrq: cpu.SoftIrq - other.SoftIrq, + Stolen: cpu.Stolen - other.Stolen, + } +} + +type LoadAverage struct { + One, Five, Fifteen float64 +} + +type Uptime struct { + Length float64 +} + +type Mem struct { + Total uint64 + Used uint64 + Free uint64 + ActualFree uint64 + ActualUsed uint64 +} + +type Swap struct { + Total uint64 + Used uint64 + Free uint64 +} + +type CpuList struct { + List []Cpu +} + +type FileSystem struct { + DirName string + DevName string + TypeName string + SysTypeName string + Options string + Flags uint32 +} + +type FileSystemList struct { + List []FileSystem +} + +type FileSystemUsage struct { + Total uint64 + Used uint64 + Free uint64 + Avail uint64 + Files uint64 + FreeFiles uint64 +} + +type ProcList struct { + List []int +} + +type RunState byte + +const ( + RunStateSleep = 'S' + RunStateRun = 'R' + RunStateStop = 'T' + RunStateZombie = 'Z' + RunStateIdle = 'D' + RunStateUnknown = '?' +) + +type ProcState struct { + Name string + State RunState + Ppid int + Tty int + Priority int + Nice int + Processor int +} + +type ProcMem struct { + Size uint64 + Resident uint64 + Share uint64 + MinorFaults uint64 + MajorFaults uint64 + PageFaults uint64 +} + +type ProcTime struct { + StartTime uint64 + User uint64 + Sys uint64 + Total uint64 +} + +type ProcArgs struct { + List []string +} + +type ProcExe struct { + Name string + Cwd string + Root string +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_linux.go b/vendor/github.com/cloudfoundry/gosigar/sigar_linux.go new file mode 100644 index 00000000..68ffb0f9 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_linux.go @@ -0,0 +1,386 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "bufio" + "bytes" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "syscall" +) + +var system struct { + ticks uint64 + btime uint64 +} + +var Procd string + +func init() { + system.ticks = 100 // C.sysconf(C._SC_CLK_TCK) + + Procd = "/proc" + + // grab system boot time + readFile(Procd+"/stat", func(line string) bool { + if strings.HasPrefix(line, "btime") { + system.btime, _ = strtoull(line[6:]) + return false // stop reading + } + return true + }) +} + +func (self *LoadAverage) Get() error { + line, err := ioutil.ReadFile(Procd + "/loadavg") + if err != nil { + return nil + } + + fields := strings.Fields(string(line)) + + self.One, _ = strconv.ParseFloat(fields[0], 64) + self.Five, _ = strconv.ParseFloat(fields[1], 64) + self.Fifteen, _ = strconv.ParseFloat(fields[2], 64) + + return nil +} + +func (self *Uptime) Get() error { + sysinfo := syscall.Sysinfo_t{} + + if err := syscall.Sysinfo(&sysinfo); err != nil { + return err + } + + self.Length = float64(sysinfo.Uptime) + + return nil +} + +func (self *Mem) Get() error { + var buffers, cached uint64 + table := map[string]*uint64{ + "MemTotal": &self.Total, + "MemFree": &self.Free, + "Buffers": &buffers, + "Cached": &cached, + } + + if err := parseMeminfo(table); err != nil { + return err + } + + self.Used = self.Total - self.Free + kern := buffers + cached + self.ActualFree = self.Free + kern + self.ActualUsed = self.Used - kern + + return nil +} + +func (self *Swap) Get() error { + table := map[string]*uint64{ + "SwapTotal": &self.Total, + "SwapFree": &self.Free, + } + + if err := parseMeminfo(table); err != nil { + return err + } + + self.Used = self.Total - self.Free + return nil +} + +func (self *Cpu) Get() error { + return readFile(Procd+"/stat", func(line string) bool { + if len(line) > 4 && line[0:4] == "cpu " { + parseCpuStat(self, line) + return false + } + return true + + }) +} + +func (self *CpuList) Get() error { + capacity := len(self.List) + if capacity == 0 { + capacity = 4 + } + list := make([]Cpu, 0, capacity) + + err := readFile(Procd+"/stat", func(line string) bool { + if len(line) > 3 && line[0:3] == "cpu" && line[3] != ' ' { + cpu := Cpu{} + parseCpuStat(&cpu, line) + list = append(list, cpu) + } + return true + }) + + self.List = list + + return err +} + +func (self *FileSystemList) Get() error { + capacity := len(self.List) + if capacity == 0 { + capacity = 10 + } + fslist := make([]FileSystem, 0, capacity) + + err := readFile("/etc/mtab", func(line string) bool { + fields := strings.Fields(line) + + fs := FileSystem{} + fs.DevName = fields[0] + fs.DirName = fields[1] + fs.SysTypeName = fields[2] + fs.Options = fields[3] + + fslist = append(fslist, fs) + + return true + }) + + self.List = fslist + + return err +} + +func (self *ProcList) Get() error { + dir, err := os.Open(Procd) + if err != nil { + return err + } + defer dir.Close() + + const readAllDirnames = -1 // see os.File.Readdirnames doc + + names, err := dir.Readdirnames(readAllDirnames) + if err != nil { + return err + } + + capacity := len(names) + list := make([]int, 0, capacity) + + for _, name := range names { + if name[0] < '0' || name[0] > '9' { + continue + } + pid, err := strconv.Atoi(name) + if err == nil { + list = append(list, pid) + } + } + + self.List = list + + return nil +} + +func (self *ProcState) Get(pid int) error { + contents, err := readProcFile(pid, "stat") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + self.Name = fields[1][1 : len(fields[1])-1] // strip ()'s + + self.State = RunState(fields[2][0]) + + self.Ppid, _ = strconv.Atoi(fields[3]) + + self.Tty, _ = strconv.Atoi(fields[6]) + + self.Priority, _ = strconv.Atoi(fields[17]) + + self.Nice, _ = strconv.Atoi(fields[18]) + + self.Processor, _ = strconv.Atoi(fields[38]) + + return nil +} + +func (self *ProcMem) Get(pid int) error { + contents, err := readProcFile(pid, "statm") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + size, _ := strtoull(fields[0]) + self.Size = size << 12 + + rss, _ := strtoull(fields[1]) + self.Resident = rss << 12 + + share, _ := strtoull(fields[2]) + self.Share = share << 12 + + contents, err = readProcFile(pid, "stat") + if err != nil { + return err + } + + fields = strings.Fields(string(contents)) + + self.MinorFaults, _ = strtoull(fields[10]) + self.MajorFaults, _ = strtoull(fields[12]) + self.PageFaults = self.MinorFaults + self.MajorFaults + + return nil +} + +func (self *ProcTime) Get(pid int) error { + contents, err := readProcFile(pid, "stat") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + user, _ := strtoull(fields[13]) + sys, _ := strtoull(fields[14]) + // convert to millis + self.User = user * (1000 / system.ticks) + self.Sys = sys * (1000 / system.ticks) + self.Total = self.User + self.Sys + + // convert to millis + self.StartTime, _ = strtoull(fields[21]) + self.StartTime /= system.ticks + self.StartTime += system.btime + self.StartTime *= 1000 + + return nil +} + +func (self *ProcArgs) Get(pid int) error { + contents, err := readProcFile(pid, "cmdline") + if err != nil { + return err + } + + bbuf := bytes.NewBuffer(contents) + + var args []string + + for { + arg, err := bbuf.ReadBytes(0) + if err == io.EOF { + break + } + args = append(args, string(chop(arg))) + } + + self.List = args + + return nil +} + +func (self *ProcExe) Get(pid int) error { + fields := map[string]*string{ + "exe": &self.Name, + "cwd": &self.Cwd, + "root": &self.Root, + } + + for name, field := range fields { + val, err := os.Readlink(procFileName(pid, name)) + + if err != nil { + return err + } + + *field = val + } + + return nil +} + +func parseMeminfo(table map[string]*uint64) error { + return readFile(Procd+"/meminfo", func(line string) bool { + fields := strings.Split(line, ":") + + if ptr := table[fields[0]]; ptr != nil { + num := strings.TrimLeft(fields[1], " ") + val, err := strtoull(strings.Fields(num)[0]) + if err == nil { + *ptr = val * 1024 + } + } + + return true + }) +} + +func parseCpuStat(self *Cpu, line string) error { + fields := strings.Fields(line) + + self.User, _ = strtoull(fields[1]) + self.Nice, _ = strtoull(fields[2]) + self.Sys, _ = strtoull(fields[3]) + self.Idle, _ = strtoull(fields[4]) + self.Wait, _ = strtoull(fields[5]) + self.Irq, _ = strtoull(fields[6]) + self.SoftIrq, _ = strtoull(fields[7]) + self.Stolen, _ = strtoull(fields[8]) + + return nil +} + +func readFile(file string, handler func(string) bool) error { + contents, err := ioutil.ReadFile(file) + if err != nil { + return err + } + + reader := bufio.NewReader(bytes.NewBuffer(contents)) + + for { + line, _, err := reader.ReadLine() + if err == io.EOF { + break + } + if !handler(string(line)) { + break + } + } + + return nil +} + +func strtoull(val string) (uint64, error) { + return strconv.ParseUint(val, 10, 64) +} + +func procFileName(pid int, name string) string { + return Procd + "/" + strconv.Itoa(pid) + "/" + name +} + +func readProcFile(pid int, name string) ([]byte, error) { + path := procFileName(pid, name) + contents, err := ioutil.ReadFile(path) + + if err != nil { + if perr, ok := err.(*os.PathError); ok { + if perr.Err == syscall.ENOENT { + return nil, syscall.ESRCH + } + } + } + + return contents, err +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_unix.go b/vendor/github.com/cloudfoundry/gosigar/sigar_unix.go new file mode 100644 index 00000000..39f18784 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_unix.go @@ -0,0 +1,26 @@ +// Copyright (c) 2012 VMware, Inc. + +// +build darwin freebsd linux netbsd openbsd + +package sigar + +import "syscall" + +func (self *FileSystemUsage) Get(path string) error { + stat := syscall.Statfs_t{} + err := syscall.Statfs(path, &stat) + if err != nil { + return err + } + + bsize := stat.Bsize / 512 + + self.Total = (uint64(stat.Blocks) * uint64(bsize)) >> 1 + self.Free = (uint64(stat.Bfree) * uint64(bsize)) >> 1 + self.Avail = (uint64(stat.Bavail) * uint64(bsize)) >> 1 + self.Used = self.Total - self.Free + self.Files = stat.Files + self.FreeFiles = stat.Ffree + + return nil +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_util.go b/vendor/github.com/cloudfoundry/gosigar/sigar_util.go new file mode 100644 index 00000000..a02df941 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_util.go @@ -0,0 +1,22 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "unsafe" +) + +func bytePtrToString(ptr *int8) string { + bytes := (*[10000]byte)(unsafe.Pointer(ptr)) + + n := 0 + for bytes[n] != 0 { + n++ + } + + return string(bytes[0:n]) +} + +func chop(buf []byte) []byte { + return buf[0 : len(buf)-1] +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_windows.go b/vendor/github.com/cloudfoundry/gosigar/sigar_windows.go new file mode 100644 index 00000000..0c779d7c --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_windows.go @@ -0,0 +1,100 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +// #include +// #include +import "C" + +import ( + "fmt" + "unsafe" +) + +func init() { +} + +func (self *LoadAverage) Get() error { + return nil +} + +func (self *Uptime) Get() error { + return nil +} + +func (self *Mem) Get() error { + var statex C.MEMORYSTATUSEX + statex.dwLength = C.DWORD(unsafe.Sizeof(statex)) + + succeeded := C.GlobalMemoryStatusEx(&statex) + if succeeded == C.FALSE { + lastError := C.GetLastError() + return fmt.Errorf("GlobalMemoryStatusEx failed with error: %d", int(lastError)) + } + + self.Total = uint64(statex.ullTotalPhys) + return nil +} + +func (self *Swap) Get() error { + return notImplemented() +} + +func (self *Cpu) Get() error { + return notImplemented() +} + +func (self *CpuList) Get() error { + return notImplemented() +} + +func (self *FileSystemList) Get() error { + return notImplemented() +} + +func (self *ProcList) Get() error { + return notImplemented() +} + +func (self *ProcState) Get(pid int) error { + return notImplemented() +} + +func (self *ProcMem) Get(pid int) error { + return notImplemented() +} + +func (self *ProcTime) Get(pid int) error { + return notImplemented() +} + +func (self *ProcArgs) Get(pid int) error { + return notImplemented() +} + +func (self *ProcExe) Get(pid int) error { + return notImplemented() +} + +func (self *FileSystemUsage) Get(path string) error { + var availableBytes C.ULARGE_INTEGER + var totalBytes C.ULARGE_INTEGER + var totalFreeBytes C.ULARGE_INTEGER + + pathChars := C.CString(path) + defer C.free(unsafe.Pointer(pathChars)) + + succeeded := C.GetDiskFreeSpaceEx((*C.CHAR)(pathChars), &availableBytes, &totalBytes, &totalFreeBytes) + if succeeded == C.FALSE { + lastError := C.GetLastError() + return fmt.Errorf("GetDiskFreeSpaceEx failed with error: %d", int(lastError)) + } + + self.Total = *(*uint64)(unsafe.Pointer(&totalBytes)) + return nil +} + +func notImplemented() error { + panic("Not Implemented") + return nil +} diff --git a/vendor/github.com/codegangsta/cli/.gitignore b/vendor/github.com/codegangsta/cli/.gitignore new file mode 100644 index 00000000..78237789 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/.gitignore @@ -0,0 +1 @@ +*.coverprofile diff --git a/vendor/github.com/codegangsta/cli/.travis.yml b/vendor/github.com/codegangsta/cli/.travis.yml index 133722f0..657e96a9 100644 --- a/vendor/github.com/codegangsta/cli/.travis.yml +++ b/vendor/github.com/codegangsta/cli/.travis.yml @@ -1,22 +1,32 @@ language: go + sudo: false go: -- 1.1.2 - 1.2.2 - 1.3.3 -- 1.4.2 -- 1.5.1 -- tip +- 1.4 +- 1.5.4 +- 1.6.2 +- master matrix: allow_failures: - - go: tip + - go: master + include: + - go: 1.6.2 + os: osx + - go: 1.1.2 + install: go get -v . + before_script: echo skipping gfmxr on $TRAVIS_GO_VERSION + script: + - ./runtests vet + - ./runtests test before_script: -- go get github.com/meatballhat/gfmxr/... +- go get github.com/urfave/gfmxr/... script: -- go vet ./... -- go test -v ./... -- gfmxr -c $(grep -c 'package main' README.md) -s README.md +- ./runtests vet +- ./runtests test +- ./runtests gfmxr diff --git a/vendor/github.com/codegangsta/cli/CHANGELOG.md b/vendor/github.com/codegangsta/cli/CHANGELOG.md index d7ea0a69..0d51d2a5 100644 --- a/vendor/github.com/codegangsta/cli/CHANGELOG.md +++ b/vendor/github.com/codegangsta/cli/CHANGELOG.md @@ -4,15 +4,33 @@ ## [Unreleased] ### Added +- `./runtests` test runner with coverage tracking by default +- testing on OS X +- testing on Windows + +### Fixed +- Printing of command aliases in help text + +## [1.17.0] - 2016-05-09 +### Added - Pluggable flag-level help text rendering via `cli.DefaultFlagStringFunc` +- `context.GlobalBoolT` was added as an analogue to `context.GlobalBool` +- Support for hiding commands by setting `Hidden: true` -- this will hide the + commands in help output ### Changed - `Float64Flag`, `IntFlag`, and `DurationFlag` default values are no longer -quoted in help text output. + quoted in help text output. - All flag types now include `(default: {value})` strings following usage when a -default value can be (reasonably) detected. + default value can be (reasonably) detected. - `IntSliceFlag` and `StringSliceFlag` usage strings are now more consistent -with non-slice flag types + with non-slice flag types +- Apps now exit with a code of 3 if an unknown subcommand is specified + (previously they printed "No help topic for...", but still exited 0. This + makes it easier to script around apps built using `cli` since they can trust + that a 0 exit code indicated a successful execution. +- cleanups based on [Go Report Card + feedback](https://goreportcard.com/report/github.com/urfave/cli) ## [1.16.0] - 2016-05-02 ### Added @@ -272,27 +290,28 @@ signature of `func(*cli.Context) error`, as defined by `cli.ActionFunc`. ### Added - Initial implementation. -[Unreleased]: https://github.com/codegangsta/cli/compare/v1.16.0...HEAD -[1.16.0]: https://github.com/codegangsta/cli/compare/v1.15.0...v1.16.0 -[1.15.0]: https://github.com/codegangsta/cli/compare/v1.14.0...v1.15.0 -[1.14.0]: https://github.com/codegangsta/cli/compare/v1.13.0...v1.14.0 -[1.13.0]: https://github.com/codegangsta/cli/compare/v1.12.0...v1.13.0 -[1.12.0]: https://github.com/codegangsta/cli/compare/v1.11.1...v1.12.0 -[1.11.1]: https://github.com/codegangsta/cli/compare/v1.11.0...v1.11.1 -[1.11.0]: https://github.com/codegangsta/cli/compare/v1.10.2...v1.11.0 -[1.10.2]: https://github.com/codegangsta/cli/compare/v1.10.1...v1.10.2 -[1.10.1]: https://github.com/codegangsta/cli/compare/v1.10.0...v1.10.1 -[1.10.0]: https://github.com/codegangsta/cli/compare/v1.9.0...v1.10.0 -[1.9.0]: https://github.com/codegangsta/cli/compare/v1.8.0...v1.9.0 -[1.8.0]: https://github.com/codegangsta/cli/compare/v1.7.1...v1.8.0 -[1.7.1]: https://github.com/codegangsta/cli/compare/v1.7.0...v1.7.1 -[1.7.0]: https://github.com/codegangsta/cli/compare/v1.6.0...v1.7.0 -[1.6.0]: https://github.com/codegangsta/cli/compare/v1.5.0...v1.6.0 -[1.5.0]: https://github.com/codegangsta/cli/compare/v1.4.1...v1.5.0 -[1.4.1]: https://github.com/codegangsta/cli/compare/v1.4.0...v1.4.1 -[1.4.0]: https://github.com/codegangsta/cli/compare/v1.3.1...v1.4.0 -[1.3.1]: https://github.com/codegangsta/cli/compare/v1.3.0...v1.3.1 -[1.3.0]: https://github.com/codegangsta/cli/compare/v1.2.0...v1.3.0 -[1.2.0]: https://github.com/codegangsta/cli/compare/v1.1.0...v1.2.0 -[1.1.0]: https://github.com/codegangsta/cli/compare/v1.0.0...v1.1.0 -[1.0.0]: https://github.com/codegangsta/cli/compare/v0.1.0...v1.0.0 +[Unreleased]: https://github.com/urfave/cli/compare/v1.17.0...HEAD +[1.17.0]: https://github.com/urfave/cli/compare/v1.16.0...v1.17.0 +[1.16.0]: https://github.com/urfave/cli/compare/v1.15.0...v1.16.0 +[1.15.0]: https://github.com/urfave/cli/compare/v1.14.0...v1.15.0 +[1.14.0]: https://github.com/urfave/cli/compare/v1.13.0...v1.14.0 +[1.13.0]: https://github.com/urfave/cli/compare/v1.12.0...v1.13.0 +[1.12.0]: https://github.com/urfave/cli/compare/v1.11.1...v1.12.0 +[1.11.1]: https://github.com/urfave/cli/compare/v1.11.0...v1.11.1 +[1.11.0]: https://github.com/urfave/cli/compare/v1.10.2...v1.11.0 +[1.10.2]: https://github.com/urfave/cli/compare/v1.10.1...v1.10.2 +[1.10.1]: https://github.com/urfave/cli/compare/v1.10.0...v1.10.1 +[1.10.0]: https://github.com/urfave/cli/compare/v1.9.0...v1.10.0 +[1.9.0]: https://github.com/urfave/cli/compare/v1.8.0...v1.9.0 +[1.8.0]: https://github.com/urfave/cli/compare/v1.7.1...v1.8.0 +[1.7.1]: https://github.com/urfave/cli/compare/v1.7.0...v1.7.1 +[1.7.0]: https://github.com/urfave/cli/compare/v1.6.0...v1.7.0 +[1.6.0]: https://github.com/urfave/cli/compare/v1.5.0...v1.6.0 +[1.5.0]: https://github.com/urfave/cli/compare/v1.4.1...v1.5.0 +[1.4.1]: https://github.com/urfave/cli/compare/v1.4.0...v1.4.1 +[1.4.0]: https://github.com/urfave/cli/compare/v1.3.1...v1.4.0 +[1.3.1]: https://github.com/urfave/cli/compare/v1.3.0...v1.3.1 +[1.3.0]: https://github.com/urfave/cli/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/urfave/cli/compare/v1.1.0...v1.2.0 +[1.1.0]: https://github.com/urfave/cli/compare/v1.0.0...v1.1.0 +[1.0.0]: https://github.com/urfave/cli/compare/v0.1.0...v1.0.0 diff --git a/vendor/github.com/codegangsta/cli/README.md b/vendor/github.com/codegangsta/cli/README.md index 06f7b844..80337c56 100644 --- a/vendor/github.com/codegangsta/cli/README.md +++ b/vendor/github.com/codegangsta/cli/README.md @@ -1,10 +1,18 @@ -[![Coverage](http://gocover.io/_badge/github.com/codegangsta/cli?0)](http://gocover.io/github.com/codegangsta/cli) -[![Build Status](https://travis-ci.org/codegangsta/cli.svg?branch=master)](https://travis-ci.org/codegangsta/cli) -[![GoDoc](https://godoc.org/github.com/codegangsta/cli?status.svg)](https://godoc.org/github.com/codegangsta/cli) -[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-codegangsta-cli) +[![Build Status](https://travis-ci.org/urfave/cli.svg?branch=master)](https://travis-ci.org/urfave/cli) +[![Windows Build Status](https://ci.appveyor.com/api/projects/status/rtgk5xufi932pb2v?svg=true)](https://ci.appveyor.com/project/meatballhat/cli) +[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli) +[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli) +[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli) +[![top level coverage](https://gocover.io/_badge/github.com/urfave/cli?0 "top level coverage")](http://gocover.io/github.com/urfave/cli) / +[![altsrc coverage](https://gocover.io/_badge/github.com/urfave/cli/altsrc?0 "altsrc coverage")](http://gocover.io/github.com/urfave/cli/altsrc) + # cli +**Notice:** This is the library formerly known as +`github.com/codegangsta/cli` -- Github will automatically redirect requests +to this repository, but we recommend updating your references for clarity. + cli is a simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. ## Overview @@ -15,11 +23,14 @@ Command line apps are usually so tiny that there is absolutely no reason why you ## Installation -Make sure you have a working Go environment (go 1.1+ is *required*). [See the install instructions](http://golang.org/doc/install.html). +Make sure you have a working Go environment. Go version 1.1+ is required for +core cli, whereas use of the [`./altsrc`](./altsrc) input extensions requires Go +version 1.2+. [See the install +instructions](http://golang.org/doc/install.html). To install cli, simply run: ``` -$ go get github.com/codegangsta/cli +$ go get github.com/urfave/cli ``` Make sure your `PATH` includes to the `$GOPATH/bin` directory so your commands can be easily used: @@ -27,6 +38,52 @@ Make sure your `PATH` includes to the `$GOPATH/bin` directory so your commands c export PATH=$PATH:$GOPATH/bin ``` +### Supported platforms + +cli is tested against multiple versions of Go on Linux, and against the latest +released version of Go on OS X and Windows. For full details, see +[`./.travis.yml`](./.travis.yml) and [`./appveyor.yml`](./appveyor.yml). + +### Using the `v2` branch + +There is currently a long-lived branch named `v2` that is intended to land as +the new `master` branch once development there has settled down. The current +`master` branch (mirrored as `v1`) is being manually merged into `v2` on +an irregular human-based schedule, but generally if one wants to "upgrade" to +`v2` *now* and accept the volatility (read: "awesomeness") that comes along with +that, please use whatever version pinning of your preference, such as via +`gopkg.in`: + +``` +$ go get gopkg.in/urfave/cli.v2 +``` + +``` go +... +import ( + "gopkg.in/urfave/cli.v2" // imports as package "cli" +) +... +``` + +### Pinning to the `v1` branch + +Similarly to the section above describing use of the `v2` branch, if one wants +to avoid any unexpected compatibility pains once `v2` becomes `master`, then +pinning to the `v1` branch is an acceptable option, e.g.: + +``` +$ go get gopkg.in/urfave/cli.v1 +``` + +``` go +... +import ( + "gopkg.in/urfave/cli.v1" // imports as package "cli" +) +... +``` + ## Getting Started One of the philosophies behind cli is that an API should be playful and full of discovery. So a cli app can be as little as one line of code in `main()`. @@ -36,7 +93,7 @@ package main import ( "os" - "github.com/codegangsta/cli" + "github.com/urfave/cli" ) func main() { @@ -46,6 +103,9 @@ func main() { This app will run and show help text, but is not very useful. Let's give an action to execute and some help documentation: + ``` go package main @@ -53,7 +113,7 @@ import ( "fmt" "os" - "github.com/codegangsta/cli" + "github.com/urfave/cli" ) func main() { @@ -77,6 +137,9 @@ Being a programmer can be a lonely job. Thankfully by the power of automation th Start by creating a directory named `greet`, and within it, add a file, `greet.go` with the following code in it: + ``` go package main @@ -84,7 +147,7 @@ import ( "fmt" "os" - "github.com/codegangsta/cli" + "github.com/urfave/cli" ) func main() { @@ -202,7 +265,7 @@ app.Action = func(c *cli.Context) error { ... ``` -See full list of flags at http://godoc.org/github.com/codegangsta/cli +See full list of flags at http://godoc.org/github.com/urfave/cli #### Placeholder Values @@ -210,6 +273,7 @@ Sometimes it's useful to specify a flag's value within the usage string itself. indicated with back quotes. For example this: + ```go cli.StringFlag{ Name: "config, c", @@ -416,7 +480,7 @@ package main import ( "os" - "github.com/codegangsta/cli" + "github.com/urfave/cli" ) func main() { @@ -504,6 +568,9 @@ templates are exposed as variables `AppHelpTemplate`, `CommandHelpTemplate`, and is possible by assigning a compatible func to the `cli.HelpPrinter` variable, e.g.: + ``` go package main @@ -512,7 +579,7 @@ import ( "io" "os" - "github.com/codegangsta/cli" + "github.com/urfave/cli" ) func main() { @@ -561,8 +628,16 @@ VERSION: ## Contribution Guidelines -Feel free to put up a pull request to fix a bug or maybe add a feature. I will give it a code review and make sure that it does not break backwards compatibility. If I or any other collaborators agree that it is in line with the vision of the project, we will work with you to get the code into a mergeable state and merge it into the master branch. +Feel free to put up a pull request to fix a bug or maybe add a feature. I will +give it a code review and make sure that it does not break backwards +compatibility. If I or any other collaborators agree that it is in line with +the vision of the project, we will work with you to get the code into +a mergeable state and merge it into the master branch. -If you have contributed something significant to the project, I will most likely add you as a collaborator. As a collaborator you are given the ability to merge others pull requests. It is very important that new code does not break existing code, so be careful about what code you do choose to merge. If you have any questions feel free to link @codegangsta to the issue in question and we can review it together. +If you have contributed something significant to the project, we will most +likely add you as a collaborator. As a collaborator you are given the ability +to merge others pull requests. It is very important that new code does not +break existing code, so be careful about what code you do choose to merge. -If you feel like you have contributed to the project but have not yet been added as a collaborator, I probably forgot to add you. Hit @codegangsta up over email and we will get it figured out. +If you feel like you have contributed to the project but have not yet been +added as a collaborator, we probably forgot to add you, please open an issue. diff --git a/vendor/github.com/codegangsta/cli/app.go b/vendor/github.com/codegangsta/cli/app.go index 89c741bf..1e33cdc7 100644 --- a/vendor/github.com/codegangsta/cli/app.go +++ b/vendor/github.com/codegangsta/cli/app.go @@ -12,7 +12,7 @@ import ( ) var ( - changeLogURL = "https://github.com/codegangsta/cli/blob/master/CHANGELOG.md" + changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md" appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL) @@ -51,7 +51,7 @@ type App struct { HideHelp bool // Boolean to hide built-in version flag and the VERSION section of help HideVersion bool - // Populate on app startup, only gettable throught method Categories() + // Populate on app startup, only gettable through method Categories() categories CommandCategories // An action to execute when the bash-completion flag is set BashComplete BashCompleteFunc @@ -82,8 +82,12 @@ type App struct { Email string // Writer writer to write output to Writer io.Writer + // ErrWriter writes error output + ErrWriter io.Writer // Other custom info Metadata map[string]interface{} + + didSetup bool } // Tries to find out when this binary was compiled. @@ -96,7 +100,8 @@ func compileTime() time.Time { return info.ModTime() } -// Creates a new cli Application with some reasonable defaults for Name, Usage, Version and Action. +// NewApp creates a new cli Application with some reasonable defaults for Name, +// Usage, Version and Action. func NewApp() *App { return &App{ Name: filepath.Base(os.Args[0]), @@ -111,8 +116,16 @@ func NewApp() *App { } } -// Entry point to the cli app. Parses the arguments slice and routes to the proper flag/args combination -func (a *App) Run(arguments []string) (err error) { +// Setup runs initialization code to ensure all data structures are ready for +// `Run` or inspection prior to `Run`. It is internally called by `Run`, but +// will return early if setup has already happened. +func (a *App) Setup() { + if a.didSetup { + return + } + + a.didSetup = true + if a.Author != "" || a.Email != "" { a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) } @@ -148,6 +161,12 @@ func (a *App) Run(arguments []string) (err error) { if !a.HideVersion { a.appendFlag(VersionFlag) } +} + +// Run is the entry point to the cli app. Parses the arguments slice and routes +// to the proper flag/args combination +func (a *App) Run(arguments []string) (err error) { + a.Setup() // parse flags set := flagSet(a.Name, a.Flags) @@ -170,11 +189,10 @@ func (a *App) Run(arguments []string) (err error) { err := a.OnUsageError(context, err, false) HandleExitCoder(err) return err - } else { - fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") - ShowAppHelp(context) - return err } + fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") + ShowAppHelp(context) + return err } if !a.HideHelp && checkHelp(context) { @@ -228,16 +246,14 @@ func (a *App) Run(arguments []string) (err error) { // DEPRECATED: Another entry point to the cli app, takes care of passing arguments and error handling func (a *App) RunAndExitOnError() { - fmt.Fprintf(os.Stderr, - "DEPRECATED cli.App.RunAndExitOnError. %s See %s\n", - contactSysadmin, runAndExitOnErrorDeprecationURL) if err := a.Run(os.Args); err != nil { - fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(a.errWriter(), err) OsExiter(1) } } -// Invokes the subcommand given the context, parses ctx.Args() to generate command-specific flags +// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to +// generate command-specific flags func (a *App) RunAsSubcommand(ctx *Context) (err error) { // append help to commands if len(a.Commands) > 0 { @@ -290,11 +306,10 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { err = a.OnUsageError(context, err, true) HandleExitCoder(err) return err - } else { - fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") - ShowSubcommandHelp(context) - return err } + fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") + ShowSubcommandHelp(context) + return err } if len(a.Commands) > 0 { @@ -346,7 +361,7 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { return err } -// Returns the named command on App. Returns nil if the command does not exist +// Command returns the named command on App. Returns nil if the command does not exist func (a *App) Command(name string) *Command { for _, c := range a.Commands { if c.HasName(name) { @@ -357,11 +372,41 @@ func (a *App) Command(name string) *Command { return nil } -// Returnes the array containing all the categories with the commands they contain +// Categories returns a slice containing all the categories with the commands they contain func (a *App) Categories() CommandCategories { return a.categories } +// VisibleCategories returns a slice of categories and commands that are +// Hidden=false +func (a *App) VisibleCategories() []*CommandCategory { + ret := []*CommandCategory{} + for _, category := range a.categories { + if visible := func() *CommandCategory { + for _, command := range category.Commands { + if !command.Hidden { + return category + } + } + return nil + }(); visible != nil { + ret = append(ret, visible) + } + } + return ret +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (a *App) VisibleCommands() []Command { + ret := []Command{} + for _, command := range a.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + // VisibleFlags returns a slice of the Flags with Hidden=false func (a *App) VisibleFlags() []Flag { return visibleFlags(a.Flags) @@ -377,6 +422,16 @@ func (a *App) hasFlag(flag Flag) bool { return false } +func (a *App) errWriter() io.Writer { + + // When the app ErrWriter is nil use the package level one. + if a.ErrWriter == nil { + return ErrWriter + } + + return a.ErrWriter +} + func (a *App) appendFlag(flag Flag) { if !a.hasFlag(flag) { a.Flags = append(a.Flags, flag) @@ -422,9 +477,6 @@ func HandleAction(action interface{}, context *Context) (err error) { vals := reflect.ValueOf(action).Call([]reflect.Value{reflect.ValueOf(context)}) if len(vals) == 0 { - fmt.Fprintf(os.Stderr, - "DEPRECATED Action signature. Must be `cli.ActionFunc`. %s See %s\n", - contactSysadmin, appActionDeprecationURL) return nil } diff --git a/vendor/github.com/codegangsta/cli/appveyor.yml b/vendor/github.com/codegangsta/cli/appveyor.yml index 3ca7afab..173086e5 100644 --- a/vendor/github.com/codegangsta/cli/appveyor.yml +++ b/vendor/github.com/codegangsta/cli/appveyor.yml @@ -2,15 +2,24 @@ version: "{build}" os: Windows Server 2012 R2 +clone_folder: c:\gopath\src\github.com\urfave\cli + +environment: + GOPATH: C:\gopath + GOVERSION: 1.6 + PYTHON: C:\Python27-x64 + PYTHON_VERSION: 2.7.x + PYTHON_ARCH: 64 + GFMXR_DEBUG: 1 + install: - - go version - - go env +- set PATH=%GOPATH%\bin;C:\go\bin;%PATH% +- go version +- go env +- go get github.com/urfave/gfmxr/... +- go get -v -t ./... build_script: - - cd %APPVEYOR_BUILD_FOLDER% - - go vet ./... - - go test -v ./... - -test: off - -deploy: off +- python runtests vet +- python runtests test +- python runtests gfmxr diff --git a/vendor/github.com/codegangsta/cli/category.go b/vendor/github.com/codegangsta/cli/category.go index 7dbf2182..1a605502 100644 --- a/vendor/github.com/codegangsta/cli/category.go +++ b/vendor/github.com/codegangsta/cli/category.go @@ -1,7 +1,9 @@ package cli +// CommandCategories is a slice of *CommandCategory. type CommandCategories []*CommandCategory +// CommandCategory is a category containing commands. type CommandCategory struct { Name string Commands Commands @@ -19,6 +21,7 @@ func (c CommandCategories) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +// AddCommand adds a command to a category. func (c CommandCategories) AddCommand(category string, command Command) CommandCategories { for _, commandCategory := range c { if commandCategory.Name == category { @@ -28,3 +31,14 @@ func (c CommandCategories) AddCommand(category string, command Command) CommandC } return append(c, &CommandCategory{Name: category, Commands: []Command{command}}) } + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (c *CommandCategory) VisibleCommands() []Command { + ret := []Command{} + for _, command := range c.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} diff --git a/vendor/github.com/codegangsta/cli/command.go b/vendor/github.com/codegangsta/cli/command.go index 9ca7e518..8950ccae 100644 --- a/vendor/github.com/codegangsta/cli/command.go +++ b/vendor/github.com/codegangsta/cli/command.go @@ -48,13 +48,15 @@ type Command struct { SkipFlagParsing bool // Boolean to hide built-in help command HideHelp bool + // Boolean to hide this command from help or completion + Hidden bool // Full name of command for help, defaults to full command name, including parent commands. HelpName string commandNamePath []string } -// Returns the full name of the command. +// FullName returns the full name of the command. // For subcommands this ensures that parent commands are part of the command path func (c Command) FullName() string { if c.commandNamePath == nil { @@ -63,9 +65,10 @@ func (c Command) FullName() string { return strings.Join(c.commandNamePath, " ") } +// Commands is a slice of Command type Commands []Command -// Invokes the command given the context, parses ctx.Args() to generate command-specific flags +// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags func (c Command) Run(ctx *Context) (err error) { if len(c.Subcommands) > 0 { return c.startApp(ctx) @@ -129,12 +132,11 @@ func (c Command) Run(ctx *Context) (err error) { err := c.OnUsageError(ctx, err, false) HandleExitCoder(err) return err - } else { - fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.") - fmt.Fprintln(ctx.App.Writer) - ShowCommandHelp(ctx, c.Name) - return err } + fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.") + fmt.Fprintln(ctx.App.Writer) + ShowCommandHelp(ctx, c.Name) + return err } nerr := normalizeFlags(c.Flags, set) @@ -189,6 +191,7 @@ func (c Command) Run(ctx *Context) (err error) { return err } +// Names returns the names including short names and aliases. func (c Command) Names() []string { names := []string{c.Name} @@ -199,7 +202,7 @@ func (c Command) Names() []string { return append(names, c.Aliases...) } -// Returns true if Command.Name or Command.ShortName matches given name +// HasName returns true if Command.Name or Command.ShortName matches given name func (c Command) HasName(name string) bool { for _, n := range c.Names() { if n == name { diff --git a/vendor/github.com/codegangsta/cli/context.go b/vendor/github.com/codegangsta/cli/context.go index ef3d2fcc..c3424636 100644 --- a/vendor/github.com/codegangsta/cli/context.go +++ b/vendor/github.com/codegangsta/cli/context.go @@ -21,57 +21,62 @@ type Context struct { parentContext *Context } -// Creates a new context. For use in when invoking an App or Command action. +// NewContext creates a new context. For use in when invoking an App or Command action. func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { return &Context{App: app, flagSet: set, parentContext: parentCtx} } -// Looks up the value of a local int flag, returns 0 if no int flag exists +// Int looks up the value of a local int flag, returns 0 if no int flag exists func (c *Context) Int(name string) int { return lookupInt(name, c.flagSet) } -// Looks up the value of a local time.Duration flag, returns 0 if no time.Duration flag exists +// Duration looks up the value of a local time.Duration flag, returns 0 if no +// time.Duration flag exists func (c *Context) Duration(name string) time.Duration { return lookupDuration(name, c.flagSet) } -// Looks up the value of a local float64 flag, returns 0 if no float64 flag exists +// Float64 looks up the value of a local float64 flag, returns 0 if no float64 +// flag exists func (c *Context) Float64(name string) float64 { return lookupFloat64(name, c.flagSet) } -// Looks up the value of a local bool flag, returns false if no bool flag exists +// Bool looks up the value of a local bool flag, returns false if no bool flag exists func (c *Context) Bool(name string) bool { return lookupBool(name, c.flagSet) } -// Looks up the value of a local boolT flag, returns false if no bool flag exists +// BoolT looks up the value of a local boolT flag, returns false if no bool flag exists func (c *Context) BoolT(name string) bool { return lookupBoolT(name, c.flagSet) } -// Looks up the value of a local string flag, returns "" if no string flag exists +// String looks up the value of a local string flag, returns "" if no string flag exists func (c *Context) String(name string) string { return lookupString(name, c.flagSet) } -// Looks up the value of a local string slice flag, returns nil if no string slice flag exists +// StringSlice looks up the value of a local string slice flag, returns nil if no +// string slice flag exists func (c *Context) StringSlice(name string) []string { return lookupStringSlice(name, c.flagSet) } -// Looks up the value of a local int slice flag, returns nil if no int slice flag exists +// IntSlice looks up the value of a local int slice flag, returns nil if no int +// slice flag exists func (c *Context) IntSlice(name string) []int { return lookupIntSlice(name, c.flagSet) } -// Looks up the value of a local generic flag, returns nil if no generic flag exists +// Generic looks up the value of a local generic flag, returns nil if no generic +// flag exists func (c *Context) Generic(name string) interface{} { return lookupGeneric(name, c.flagSet) } -// Looks up the value of a global int flag, returns 0 if no int flag exists +// GlobalInt looks up the value of a global int flag, returns 0 if no int flag exists func (c *Context) GlobalInt(name string) int { if fs := lookupGlobalFlagSet(name, c); fs != nil { return lookupInt(name, fs) @@ -79,8 +84,8 @@ func (c *Context) GlobalInt(name string) int { return 0 } -// Looks up the value of a global float64 flag, returns float64(0) if no float64 -// flag exists +// GlobalFloat64 looks up the value of a global float64 flag, returns float64(0) +// if no float64 flag exists func (c *Context) GlobalFloat64(name string) float64 { if fs := lookupGlobalFlagSet(name, c); fs != nil { return lookupFloat64(name, fs) @@ -88,7 +93,8 @@ func (c *Context) GlobalFloat64(name string) float64 { return float64(0) } -// Looks up the value of a global time.Duration flag, returns 0 if no time.Duration flag exists +// GlobalDuration looks up the value of a global time.Duration flag, returns 0 +// if no time.Duration flag exists func (c *Context) GlobalDuration(name string) time.Duration { if fs := lookupGlobalFlagSet(name, c); fs != nil { return lookupDuration(name, fs) @@ -96,7 +102,8 @@ func (c *Context) GlobalDuration(name string) time.Duration { return 0 } -// Looks up the value of a global bool flag, returns false if no bool flag exists +// GlobalBool looks up the value of a global bool flag, returns false if no bool +// flag exists func (c *Context) GlobalBool(name string) bool { if fs := lookupGlobalFlagSet(name, c); fs != nil { return lookupBool(name, fs) @@ -104,7 +111,17 @@ func (c *Context) GlobalBool(name string) bool { return false } -// Looks up the value of a global string flag, returns "" if no string flag exists +// GlobalBoolT looks up the value of a global bool flag, returns true if no bool +// flag exists +func (c *Context) GlobalBoolT(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBoolT(name, fs) + } + return false +} + +// GlobalString looks up the value of a global string flag, returns "" if no +// string flag exists func (c *Context) GlobalString(name string) string { if fs := lookupGlobalFlagSet(name, c); fs != nil { return lookupString(name, fs) @@ -112,7 +129,8 @@ func (c *Context) GlobalString(name string) string { return "" } -// Looks up the value of a global string slice flag, returns nil if no string slice flag exists +// GlobalStringSlice looks up the value of a global string slice flag, returns +// nil if no string slice flag exists func (c *Context) GlobalStringSlice(name string) []string { if fs := lookupGlobalFlagSet(name, c); fs != nil { return lookupStringSlice(name, fs) @@ -120,7 +138,8 @@ func (c *Context) GlobalStringSlice(name string) []string { return nil } -// Looks up the value of a global int slice flag, returns nil if no int slice flag exists +// GlobalIntSlice looks up the value of a global int slice flag, returns nil if +// no int slice flag exists func (c *Context) GlobalIntSlice(name string) []int { if fs := lookupGlobalFlagSet(name, c); fs != nil { return lookupIntSlice(name, fs) @@ -128,7 +147,8 @@ func (c *Context) GlobalIntSlice(name string) []int { return nil } -// Looks up the value of a global generic flag, returns nil if no generic flag exists +// GlobalGeneric looks up the value of a global generic flag, returns nil if no +// generic flag exists func (c *Context) GlobalGeneric(name string) interface{} { if fs := lookupGlobalFlagSet(name, c); fs != nil { return lookupGeneric(name, fs) @@ -136,7 +156,7 @@ func (c *Context) GlobalGeneric(name string) interface{} { return nil } -// Returns the number of flags set +// NumFlags returns the number of flags set func (c *Context) NumFlags() int { return c.flagSet.NFlag() } @@ -151,7 +171,7 @@ func (c *Context) GlobalSet(name, value string) error { return globalContext(c).flagSet.Set(name, value) } -// Determines if the flag was actually set +// IsSet determines if the flag was actually set func (c *Context) IsSet(name string) bool { if c.setFlags == nil { c.setFlags = make(map[string]bool) @@ -162,7 +182,7 @@ func (c *Context) IsSet(name string) bool { return c.setFlags[name] == true } -// Determines if the global flag was actually set +// GlobalIsSet determines if the global flag was actually set func (c *Context) GlobalIsSet(name string) bool { if c.globalSetFlags == nil { c.globalSetFlags = make(map[string]bool) @@ -179,7 +199,7 @@ func (c *Context) GlobalIsSet(name string) bool { return c.globalSetFlags[name] } -// Returns a slice of flag names used in this context. +// FlagNames returns a slice of flag names used in this context. func (c *Context) FlagNames() (names []string) { for _, flag := range c.Command.Flags { name := strings.Split(flag.GetName(), ",")[0] @@ -191,7 +211,7 @@ func (c *Context) FlagNames() (names []string) { return } -// Returns a slice of global flag names used by the app. +// GlobalFlagNames returns a slice of global flag names used by the app. func (c *Context) GlobalFlagNames() (names []string) { for _, flag := range c.App.Flags { name := strings.Split(flag.GetName(), ",")[0] @@ -203,25 +223,26 @@ func (c *Context) GlobalFlagNames() (names []string) { return } -// Returns the parent context, if any +// Parent returns the parent context, if any func (c *Context) Parent() *Context { return c.parentContext } +// Args contains apps console arguments type Args []string -// Returns the command line arguments associated with the context. +// Args returns the command line arguments associated with the context. func (c *Context) Args() Args { args := Args(c.flagSet.Args()) return args } -// Returns the number of the command line arguments. +// NArg returns the number of the command line arguments. func (c *Context) NArg() int { return len(c.Args()) } -// Returns the nth argument, or else a blank string +// Get returns the nth argument, or else a blank string func (a Args) Get(n int) string { if len(a) > n { return a[n] @@ -229,12 +250,12 @@ func (a Args) Get(n int) string { return "" } -// Returns the first argument, or else a blank string +// First returns the first argument, or else a blank string func (a Args) First() string { return a.Get(0) } -// Return the rest of the arguments (not the first one) +// Tail returns the rest of the arguments (not the first one) // or else an empty string slice func (a Args) Tail() []string { if len(a) >= 2 { @@ -243,12 +264,12 @@ func (a Args) Tail() []string { return []string{} } -// Checks if there are any arguments present +// Present checks if there are any arguments present func (a Args) Present() bool { return len(a) != 0 } -// Swaps arguments at the given indexes +// Swap swaps arguments at the given indexes func (a Args) Swap(from, to int) error { if from >= len(a) || to >= len(a) { return errors.New("index out of range") diff --git a/vendor/github.com/codegangsta/cli/errors.go b/vendor/github.com/codegangsta/cli/errors.go index 5f1e83be..ea551be1 100644 --- a/vendor/github.com/codegangsta/cli/errors.go +++ b/vendor/github.com/codegangsta/cli/errors.go @@ -2,20 +2,29 @@ package cli import ( "fmt" + "io" "os" "strings" ) +// OsExiter is the function used when the app exits. If not set defaults to os.Exit. var OsExiter = os.Exit +// ErrWriter is used to write errors to the user. This can be anything +// implementing the io.Writer interface and defaults to os.Stderr. +var ErrWriter io.Writer = os.Stderr + +// MultiError is an error that wraps multiple errors. type MultiError struct { Errors []error } +// NewMultiError creates a new MultiError. Pass in one or more errors. func NewMultiError(err ...error) MultiError { return MultiError{Errors: err} } +// Error implents the error interface. func (m MultiError) Error() string { errs := make([]string, len(m.Errors)) for i, err := range m.Errors { @@ -69,7 +78,7 @@ func HandleExitCoder(err error) { if exitErr, ok := err.(ExitCoder); ok { if err.Error() != "" { - fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(ErrWriter, err) } OsExiter(exitErr.ExitCode()) return diff --git a/vendor/github.com/codegangsta/cli/flag.go b/vendor/github.com/codegangsta/cli/flag.go index 3b6a2e19..1e8112e7 100644 --- a/vendor/github.com/codegangsta/cli/flag.go +++ b/vendor/github.com/codegangsta/cli/flag.go @@ -13,19 +13,19 @@ import ( const defaultPlaceholder = "value" -// This flag enables bash-completion for all commands and subcommands +// BashCompletionFlag enables bash-completion for all commands and subcommands var BashCompletionFlag = BoolFlag{ Name: "generate-bash-completion", Hidden: true, } -// This flag prints the version for the application +// VersionFlag prints the version for the application var VersionFlag = BoolFlag{ Name: "version, v", Usage: "print the version", } -// This flag prints the help for all commands and subcommands +// HelpFlag prints the help for all commands and subcommands // Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand // unless HideHelp is set to true) var HelpFlag = BoolFlag{ @@ -33,6 +33,8 @@ var HelpFlag = BoolFlag{ Usage: "show help", } +// FlagStringer converts a flag definition to a string. This is used by help +// to display a flag. var FlagStringer FlagStringFunc = stringifyFlag // Flag is a common interface related to parsing flags in cli. @@ -103,6 +105,7 @@ func (f GenericFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of a flag. func (f GenericFlag) GetName() string { return f.Name } @@ -126,7 +129,7 @@ func (f *StringSlice) Value() []string { return *f } -// StringSlice is a string flag that can be specified multiple times on the +// StringSliceFlag is a string flag that can be specified multiple times on the // command-line type StringSliceFlag struct { Name string @@ -166,11 +169,12 @@ func (f StringSliceFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of a flag. func (f StringSliceFlag) GetName() string { return f.Name } -// StringSlice is an opaque type for []int to satisfy flag.Value +// IntSlice is an opaque type for []int to satisfy flag.Value type IntSlice []int // Set parses the value into an integer and appends it to the list of values @@ -178,9 +182,8 @@ func (f *IntSlice) Set(value string) error { tmp, err := strconv.Atoi(value) if err != nil { return err - } else { - *f = append(*f, tmp) } + *f = append(*f, tmp) return nil } @@ -220,7 +223,7 @@ func (f IntSliceFlag) Apply(set *flag.FlagSet) { s = strings.TrimSpace(s) err := newVal.Set(s) if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) + fmt.Fprintf(ErrWriter, err.Error()) } } f.Value = newVal @@ -237,6 +240,7 @@ func (f IntSliceFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of the flag. func (f IntSliceFlag) GetName() string { return f.Name } @@ -280,6 +284,7 @@ func (f BoolFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of the flag. func (f BoolFlag) GetName() string { return f.Name } @@ -324,6 +329,7 @@ func (f BoolTFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of the flag. func (f BoolTFlag) GetName() string { return f.Name } @@ -364,6 +370,7 @@ func (f StringFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of the flag. func (f StringFlag) GetName() string { return f.Name } @@ -408,6 +415,7 @@ func (f IntFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of the flag. func (f IntFlag) GetName() string { return f.Name } @@ -452,6 +460,7 @@ func (f DurationFlag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of the flag. func (f DurationFlag) GetName() string { return f.Name } @@ -495,6 +504,7 @@ func (f Float64Flag) Apply(set *flag.FlagSet) { }) } +// GetName returns the name of the flag. func (f Float64Flag) GetName() string { return f.Name } diff --git a/vendor/github.com/codegangsta/cli/funcs.go b/vendor/github.com/codegangsta/cli/funcs.go index 6b2a8463..cba5e6cb 100644 --- a/vendor/github.com/codegangsta/cli/funcs.go +++ b/vendor/github.com/codegangsta/cli/funcs.go @@ -1,23 +1,23 @@ package cli -// An action to execute when the bash-completion flag is set +// BashCompleteFunc is an action to execute when the bash-completion flag is set type BashCompleteFunc func(*Context) -// An action to execute before any subcommands are run, but after the context is ready -// If a non-nil error is returned, no subcommands are run +// BeforeFunc is an action to execute before any subcommands are run, but after +// the context is ready if a non-nil error is returned, no subcommands are run type BeforeFunc func(*Context) error -// An action to execute after any subcommands are run, but after the subcommand has finished -// It is run even if Action() panics +// AfterFunc is an action to execute after any subcommands are run, but after the +// subcommand has finished it is run even if Action() panics type AfterFunc func(*Context) error -// The action to execute when no subcommands are specified +// ActionFunc is the action to execute when no subcommands are specified type ActionFunc func(*Context) error -// Execute this function if the proper command cannot be found +// CommandNotFoundFunc is executed if the proper command cannot be found type CommandNotFoundFunc func(*Context, string) -// Execute this function if an usage error occurs. This is useful for displaying +// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying // customized usage error messages. This function is able to replace the // original error messages. If this function is not set, the "Incorrect usage" // is displayed and the execution is interrupted. diff --git a/vendor/github.com/codegangsta/cli/help.go b/vendor/github.com/codegangsta/cli/help.go index 45e8603b..a9e7327e 100644 --- a/vendor/github.com/codegangsta/cli/help.go +++ b/vendor/github.com/codegangsta/cli/help.go @@ -3,12 +3,13 @@ package cli import ( "fmt" "io" + "os" "strings" "text/tabwriter" "text/template" ) -// The text template for the Default help topic. +// AppHelpTemplate is the text template for the Default help topic. // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var AppHelpTemplate = `NAME: @@ -21,21 +22,21 @@ VERSION: {{.Version}} {{end}}{{end}}{{if len .Authors}} AUTHOR(S): - {{range .Authors}}{{ . }}{{end}} - {{end}}{{if .Commands}} -COMMANDS:{{range .Categories}}{{if .Name}} - {{.Name}}{{ ":" }}{{end}}{{range .Commands}} - {{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}{{end}} + {{range .Authors}}{{.}}{{end}} + {{end}}{{if .VisibleCommands}} +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}} {{end}}{{end}}{{if .VisibleFlags}} GLOBAL OPTIONS: {{range .VisibleFlags}}{{.}} - {{end}}{{end}}{{if .Copyright }} + {{end}}{{end}}{{if .Copyright}} COPYRIGHT: {{.Copyright}} {{end}} ` -// The text template for the command help topic. +// CommandHelpTemplate is the text template for the command help topic. // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var CommandHelpTemplate = `NAME: @@ -52,10 +53,10 @@ DESCRIPTION: OPTIONS: {{range .VisibleFlags}}{{.}} - {{end}}{{ end }} + {{end}}{{end}} ` -// The text template for the subcommand help topic. +// SubcommandHelpTemplate is the text template for the subcommand help topic. // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var SubcommandHelpTemplate = `NAME: @@ -64,9 +65,9 @@ var SubcommandHelpTemplate = `NAME: USAGE: {{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} -COMMANDS:{{range .Categories}}{{if .Name}} - {{.Name}}{{ ":" }}{{end}}{{range .Commands}} - {{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}{{end}} +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}} {{end}}{{if .VisibleFlags}} OPTIONS: {{range .VisibleFlags}}{{.}} @@ -81,10 +82,10 @@ var helpCommand = Command{ Action: func(c *Context) error { args := c.Args() if args.Present() { - ShowCommandHelp(c, args.First()) - } else { - ShowAppHelp(c) + return ShowCommandHelp(c, args.First()) } + + ShowAppHelp(c) return nil }, } @@ -97,63 +98,70 @@ var helpSubcommand = Command{ Action: func(c *Context) error { args := c.Args() if args.Present() { - ShowCommandHelp(c, args.First()) - } else { - ShowSubcommandHelp(c) + return ShowCommandHelp(c, args.First()) } - return nil + + return ShowSubcommandHelp(c) }, } // Prints help for the App or Command type helpPrinter func(w io.Writer, templ string, data interface{}) +// HelpPrinter is a function that writes the help output. If not set a default +// is used. The function signature is: +// func(w io.Writer, templ string, data interface{}) var HelpPrinter helpPrinter = printHelp -// Prints version for the App +// VersionPrinter prints the version for the App var VersionPrinter = printVersion +// ShowAppHelp is an action that displays the help. func ShowAppHelp(c *Context) { HelpPrinter(c.App.Writer, AppHelpTemplate, c.App) } -// Prints the list of subcommands as the default app completion method +// DefaultAppComplete prints the list of subcommands as the default app completion method func DefaultAppComplete(c *Context) { for _, command := range c.App.Commands { + if command.Hidden { + continue + } for _, name := range command.Names() { fmt.Fprintln(c.App.Writer, name) } } } -// Prints help for the given command -func ShowCommandHelp(ctx *Context, command string) { +// ShowCommandHelp prints help for the given command +func ShowCommandHelp(ctx *Context, command string) error { // show the subcommand help for a command with subcommands if command == "" { HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) - return + return nil } for _, c := range ctx.App.Commands { if c.HasName(command) { HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c) - return + return nil } } - if ctx.App.CommandNotFound != nil { - ctx.App.CommandNotFound(ctx, command) - } else { - fmt.Fprintf(ctx.App.Writer, "No help topic for '%v'\n", command) + if ctx.App.CommandNotFound == nil { + return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3) } + + ctx.App.CommandNotFound(ctx, command) + return nil } -// Prints help for the given subcommand -func ShowSubcommandHelp(c *Context) { - ShowCommandHelp(c, c.Command.Name) +// ShowSubcommandHelp prints help for the given subcommand +func ShowSubcommandHelp(c *Context) error { + return ShowCommandHelp(c, c.Command.Name) } -// Prints the version number of the App +// ShowVersion prints the version number of the App func ShowVersion(c *Context) { VersionPrinter(c) } @@ -162,7 +170,7 @@ func printVersion(c *Context) { fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) } -// Prints the lists of commands within a given context +// ShowCompletions prints the lists of commands within a given context func ShowCompletions(c *Context) { a := c.App if a != nil && a.BashComplete != nil { @@ -170,7 +178,7 @@ func ShowCompletions(c *Context) { } } -// Prints the custom completions for a given command +// ShowCommandCompletions prints the custom completions for a given command func ShowCommandCompletions(ctx *Context, command string) { c := ctx.App.Command(command) if c != nil && c.BashComplete != nil { @@ -188,7 +196,10 @@ func printHelp(out io.Writer, templ string, data interface{}) { err := t.Execute(w, data) if err != nil { // If the writer is closed, t.Execute will fail, and there's nothing - // we can do to recover. We could send this to os.Stderr if we need. + // we can do to recover. + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } return } w.Flush() diff --git a/vendor/github.com/codegangsta/cli/runtests b/vendor/github.com/codegangsta/cli/runtests new file mode 100755 index 00000000..72c1f0dd --- /dev/null +++ b/vendor/github.com/codegangsta/cli/runtests @@ -0,0 +1,99 @@ +#!/usr/bin/env python +from __future__ import print_function + +import argparse +import os +import sys +import tempfile + +from subprocess import check_call, check_output + + +PACKAGE_NAME = os.environ.get( + 'CLI_PACKAGE_NAME', 'github.com/urfave/cli' +) + + +def main(sysargs=sys.argv[:]): + targets = { + 'vet': _vet, + 'test': _test, + 'gfmxr': _gfmxr + } + + parser = argparse.ArgumentParser() + parser.add_argument( + 'target', nargs='?', choices=tuple(targets.keys()), default='test' + ) + args = parser.parse_args(sysargs[1:]) + + targets[args.target]() + return 0 + + +def _test(): + if check_output('go version'.split()).split()[2] < 'go1.2': + _run('go test -v .'.split()) + return + + coverprofiles = [] + for subpackage in ['', 'altsrc']: + coverprofile = 'cli.coverprofile' + if subpackage != '': + coverprofile = '{}.coverprofile'.format(subpackage) + + coverprofiles.append(coverprofile) + + _run('go test -v'.split() + [ + '-coverprofile={}'.format(coverprofile), + ('{}/{}'.format(PACKAGE_NAME, subpackage)).rstrip('/') + ]) + + combined_name = _combine_coverprofiles(coverprofiles) + _run('go tool cover -func={}'.format(combined_name).split()) + os.remove(combined_name) + + +def _gfmxr(): + _run(['gfmxr', '-c', str(_gfmxr_count()), '-s', 'README.md']) + + +def _vet(): + _run('go vet ./...'.split()) + + +def _run(command): + print('runtests: {}'.format(' '.join(command)), file=sys.stderr) + check_call(command) + + +def _gfmxr_count(): + with open('README.md') as infile: + lines = infile.read().splitlines() + return len(filter(_is_go_runnable, lines)) + + +def _is_go_runnable(line): + return line.startswith('package main') + + +def _combine_coverprofiles(coverprofiles): + combined = tempfile.NamedTemporaryFile( + suffix='.coverprofile', delete=False + ) + combined.write('mode: set\n') + + for coverprofile in coverprofiles: + with open(coverprofile, 'r') as infile: + for line in infile.readlines(): + if not line.startswith('mode: '): + combined.write(line) + + combined.flush() + name = combined.name + combined.close() + return name + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/vendor/github.com/coreos/coreos-cloudinit/initialize/config.go b/vendor/github.com/coreos/coreos-cloudinit/initialize/config.go deleted file mode 100644 index 94a47fa3..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/initialize/config.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package initialize - -import ( - "errors" - "fmt" - "log" - "path" - - "github.com/coreos/coreos-cloudinit/config" - "github.com/coreos/coreos-cloudinit/network" - "github.com/coreos/coreos-cloudinit/system" -) - -// CloudConfigFile represents a CoreOS specific configuration option that can generate -// an associated system.File to be written to disk -type CloudConfigFile interface { - // File should either return (*system.File, error), or (nil, nil) if nothing - // needs to be done for this configuration option. - File() (*system.File, error) -} - -// CloudConfigUnit represents a CoreOS specific configuration option that can generate -// associated system.Units to be created/enabled appropriately -type CloudConfigUnit interface { - Units() []system.Unit -} - -// Apply renders a CloudConfig to an Environment. This can involve things like -// configuring the hostname, adding new users, writing various configuration -// files to disk, and manipulating systemd services. -func Apply(cfg config.CloudConfig, ifaces []network.InterfaceGenerator, env *Environment) error { - if cfg.Hostname != "" { - if err := system.SetHostname(cfg.Hostname); err != nil { - return err - } - log.Printf("Set hostname to %s", cfg.Hostname) - } - - for _, user := range cfg.Users { - if user.Name == "" { - log.Printf("User object has no 'name' field, skipping") - continue - } - - if system.UserExists(&user) { - log.Printf("User '%s' exists, ignoring creation-time fields", user.Name) - if user.PasswordHash != "" { - log.Printf("Setting '%s' user's password", user.Name) - if err := system.SetUserPassword(user.Name, user.PasswordHash); err != nil { - log.Printf("Failed setting '%s' user's password: %v", user.Name, err) - return err - } - } - } else { - log.Printf("Creating user '%s'", user.Name) - if err := system.CreateUser(&user); err != nil { - log.Printf("Failed creating user '%s': %v", user.Name, err) - return err - } - } - - if len(user.SSHAuthorizedKeys) > 0 { - log.Printf("Authorizing %d SSH keys for user '%s'", len(user.SSHAuthorizedKeys), user.Name) - if err := system.AuthorizeSSHKeys(user.Name, env.SSHKeyName(), user.SSHAuthorizedKeys); err != nil { - return err - } - } - if user.SSHImportGithubUser != "" { - log.Printf("Authorizing github user %s SSH keys for CoreOS user '%s'", user.SSHImportGithubUser, user.Name) - if err := SSHImportGithubUser(user.Name, user.SSHImportGithubUser); err != nil { - return err - } - } - for _, u := range user.SSHImportGithubUsers { - log.Printf("Authorizing github user %s SSH keys for CoreOS user '%s'", u, user.Name) - if err := SSHImportGithubUser(user.Name, u); err != nil { - return err - } - } - if user.SSHImportURL != "" { - log.Printf("Authorizing SSH keys for CoreOS user '%s' from '%s'", user.Name, user.SSHImportURL) - if err := SSHImportKeysFromURL(user.Name, user.SSHImportURL); err != nil { - return err - } - } - } - - if len(cfg.SSHAuthorizedKeys) > 0 { - err := system.AuthorizeSSHKeys("core", env.SSHKeyName(), cfg.SSHAuthorizedKeys) - if err == nil { - log.Printf("Authorized SSH keys for core user") - } else { - return err - } - } - - var writeFiles []system.File - for _, file := range cfg.WriteFiles { - writeFiles = append(writeFiles, system.File{File: file}) - } - - for _, ccf := range []CloudConfigFile{ - system.OEM{OEM: cfg.CoreOS.OEM}, - system.Update{Update: cfg.CoreOS.Update, ReadConfig: system.DefaultReadConfig}, - system.EtcHosts{EtcHosts: cfg.ManageEtcHosts}, - system.Flannel{Flannel: cfg.CoreOS.Flannel}, - } { - f, err := ccf.File() - if err != nil { - return err - } - if f != nil { - writeFiles = append(writeFiles, *f) - } - } - - var units []system.Unit - for _, u := range cfg.CoreOS.Units { - units = append(units, system.Unit{Unit: u}) - } - - for _, ccu := range []CloudConfigUnit{ - system.Etcd{Etcd: cfg.CoreOS.Etcd}, - system.Etcd2{Etcd2: cfg.CoreOS.Etcd2}, - system.Fleet{Fleet: cfg.CoreOS.Fleet}, - system.Locksmith{Locksmith: cfg.CoreOS.Locksmith}, - system.Update{Update: cfg.CoreOS.Update, ReadConfig: system.DefaultReadConfig}, - } { - units = append(units, ccu.Units()...) - } - - wroteEnvironment := false - for _, file := range writeFiles { - fullPath, err := system.WriteFile(&file, env.Root()) - if err != nil { - return err - } - if path.Clean(file.Path) == "/etc/environment" { - wroteEnvironment = true - } - log.Printf("Wrote file %s to filesystem", fullPath) - } - - if !wroteEnvironment { - ef := env.DefaultEnvironmentFile() - if ef != nil { - err := system.WriteEnvFile(ef, env.Root()) - if err != nil { - return err - } - log.Printf("Updated /etc/environment") - } - } - - if len(ifaces) > 0 { - units = append(units, createNetworkingUnits(ifaces)...) - if err := system.RestartNetwork(ifaces); err != nil { - return err - } - } - - um := system.NewUnitManager(env.Root()) - return processUnits(units, env.Root(), um) -} - -func createNetworkingUnits(interfaces []network.InterfaceGenerator) (units []system.Unit) { - appendNewUnit := func(units []system.Unit, name, content string) []system.Unit { - if content == "" { - return units - } - return append(units, system.Unit{Unit: config.Unit{ - Name: name, - Runtime: true, - Content: content, - }}) - } - for _, i := range interfaces { - units = appendNewUnit(units, fmt.Sprintf("%s.netdev", i.Filename()), i.Netdev()) - units = appendNewUnit(units, fmt.Sprintf("%s.link", i.Filename()), i.Link()) - units = appendNewUnit(units, fmt.Sprintf("%s.network", i.Filename()), i.Network()) - } - return units -} - -// processUnits takes a set of Units and applies them to the given root using -// the given UnitManager. This can involve things like writing unit files to -// disk, masking/unmasking units, or invoking systemd -// commands against units. It returns any error encountered. -func processUnits(units []system.Unit, root string, um system.UnitManager) error { - type action struct { - unit system.Unit - command string - } - actions := make([]action, 0, len(units)) - reload := false - restartNetworkd := false - for _, unit := range units { - if unit.Name == "" { - log.Printf("Skipping unit without name") - continue - } - - if unit.Content != "" { - log.Printf("Writing unit %q to filesystem", unit.Name) - if err := um.PlaceUnit(unit); err != nil { - return err - } - log.Printf("Wrote unit %q", unit.Name) - reload = true - } - - for _, dropin := range unit.DropIns { - if dropin.Name != "" && dropin.Content != "" { - log.Printf("Writing drop-in unit %q to filesystem", dropin.Name) - if err := um.PlaceUnitDropIn(unit, dropin); err != nil { - return err - } - log.Printf("Wrote drop-in unit %q", dropin.Name) - reload = true - } - } - - if unit.Mask { - log.Printf("Masking unit file %q", unit.Name) - if err := um.MaskUnit(unit); err != nil { - return err - } - } else if unit.Runtime { - log.Printf("Ensuring runtime unit file %q is unmasked", unit.Name) - if err := um.UnmaskUnit(unit); err != nil { - return err - } - } - - if unit.Enable { - if unit.Group() != "network" { - log.Printf("Enabling unit file %q", unit.Name) - if err := um.EnableUnitFile(unit); err != nil { - return err - } - log.Printf("Enabled unit %q", unit.Name) - } else { - log.Printf("Skipping enable for network-like unit %q", unit.Name) - } - } - - if unit.Group() == "network" { - restartNetworkd = true - } else if unit.Command != "" { - actions = append(actions, action{unit, unit.Command}) - } - } - - if reload { - if err := um.DaemonReload(); err != nil { - return errors.New(fmt.Sprintf("failed systemd daemon-reload: %s", err)) - } - } - - if restartNetworkd { - log.Printf("Restarting systemd-networkd") - networkd := system.Unit{Unit: config.Unit{Name: "systemd-networkd.service"}} - res, err := um.RunUnitCommand(networkd, "restart") - if err != nil { - return err - } - log.Printf("Restarted systemd-networkd (%s)", res) - } - - for _, action := range actions { - log.Printf("Calling unit command %q on %q'", action.command, action.unit.Name) - res, err := um.RunUnitCommand(action.unit, action.command) - if err != nil { - return err - } - log.Printf("Result of %q on %q: %s", action.command, action.unit.Name, res) - } - - return nil -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/debian.go b/vendor/github.com/coreos/coreos-cloudinit/network/debian.go deleted file mode 100644 index 91646cbd..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/debian.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "log" - "strings" -) - -func ProcessDebianNetconf(config []byte) ([]InterfaceGenerator, error) { - log.Println("Processing Debian network config") - lines := formatConfig(string(config)) - stanzas, err := parseStanzas(lines) - if err != nil { - return nil, err - } - - interfaces := make([]*stanzaInterface, 0, len(stanzas)) - for _, stanza := range stanzas { - switch s := stanza.(type) { - case *stanzaInterface: - interfaces = append(interfaces, s) - } - } - log.Printf("Parsed %d network interfaces\n", len(interfaces)) - - log.Println("Processed Debian network config") - return buildInterfaces(interfaces), nil -} - -func formatConfig(config string) []string { - lines := []string{} - config = strings.Replace(config, "\\\n", "", -1) - for config != "" { - split := strings.SplitN(config, "\n", 2) - line := strings.TrimSpace(split[0]) - - if len(split) == 2 { - config = split[1] - } else { - config = "" - } - - if strings.HasPrefix(line, "#") || line == "" { - continue - } - - lines = append(lines, line) - } - return lines -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/digitalocean.go b/vendor/github.com/coreos/coreos-cloudinit/network/digitalocean.go deleted file mode 100644 index 78759a10..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/digitalocean.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "fmt" - "log" - "net" - - "github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean" -) - -func ProcessDigitalOceanNetconf(config digitalocean.Metadata) ([]InterfaceGenerator, error) { - log.Println("Processing DigitalOcean network config") - - log.Println("Parsing nameservers") - nameservers, err := parseNameservers(config.DNS) - if err != nil { - return nil, err - } - log.Printf("Parsed %d nameservers\n", len(nameservers)) - - log.Println("Parsing interfaces") - generators, err := parseInterfaces(config.Interfaces, nameservers) - if err != nil { - return nil, err - } - log.Printf("Parsed %d network interfaces\n", len(generators)) - - log.Println("Processed DigitalOcean network config") - return generators, nil -} - -func parseNameservers(config digitalocean.DNS) ([]net.IP, error) { - nameservers := make([]net.IP, 0, len(config.Nameservers)) - for _, ns := range config.Nameservers { - if ip := net.ParseIP(ns); ip == nil { - return nil, fmt.Errorf("could not parse %q as nameserver IP address", ns) - } else { - nameservers = append(nameservers, ip) - } - } - return nameservers, nil -} - -func parseInterfaces(config digitalocean.Interfaces, nameservers []net.IP) ([]InterfaceGenerator, error) { - generators := make([]InterfaceGenerator, 0, len(config.Public)+len(config.Private)) - for _, iface := range config.Public { - if generator, err := parseInterface(iface, nameservers, true); err == nil { - generators = append(generators, &physicalInterface{*generator}) - } else { - return nil, err - } - } - for _, iface := range config.Private { - if generator, err := parseInterface(iface, []net.IP{}, false); err == nil { - generators = append(generators, &physicalInterface{*generator}) - } else { - return nil, err - } - } - return generators, nil -} - -func parseInterface(iface digitalocean.Interface, nameservers []net.IP, useRoute bool) (*logicalInterface, error) { - routes := make([]route, 0) - addresses := make([]net.IPNet, 0) - if iface.IPv4 != nil { - var ip, mask, gateway net.IP - if ip = net.ParseIP(iface.IPv4.IPAddress); ip == nil { - return nil, fmt.Errorf("could not parse %q as IPv4 address", iface.IPv4.IPAddress) - } - if mask = net.ParseIP(iface.IPv4.Netmask); mask == nil { - return nil, fmt.Errorf("could not parse %q as IPv4 mask", iface.IPv4.Netmask) - } - addresses = append(addresses, net.IPNet{ - IP: ip, - Mask: net.IPMask(mask), - }) - - if useRoute { - if gateway = net.ParseIP(iface.IPv4.Gateway); gateway == nil { - return nil, fmt.Errorf("could not parse %q as IPv4 gateway", iface.IPv4.Gateway) - } - routes = append(routes, route{ - destination: net.IPNet{ - IP: net.IPv4zero, - Mask: net.IPMask(net.IPv4zero), - }, - gateway: gateway, - }) - } - } - if iface.IPv6 != nil { - var ip, gateway net.IP - if ip = net.ParseIP(iface.IPv6.IPAddress); ip == nil { - return nil, fmt.Errorf("could not parse %q as IPv6 address", iface.IPv6.IPAddress) - } - addresses = append(addresses, net.IPNet{ - IP: ip, - Mask: net.CIDRMask(iface.IPv6.Cidr, net.IPv6len*8), - }) - - if useRoute { - if gateway = net.ParseIP(iface.IPv6.Gateway); gateway == nil { - return nil, fmt.Errorf("could not parse %q as IPv6 gateway", iface.IPv6.Gateway) - } - routes = append(routes, route{ - destination: net.IPNet{ - IP: net.IPv6zero, - Mask: net.IPMask(net.IPv6zero), - }, - gateway: gateway, - }) - } - } - if iface.AnchorIPv4 != nil { - var ip, mask net.IP - if ip = net.ParseIP(iface.AnchorIPv4.IPAddress); ip == nil { - return nil, fmt.Errorf("could not parse %q as anchor IPv4 address", iface.AnchorIPv4.IPAddress) - } - if mask = net.ParseIP(iface.AnchorIPv4.Netmask); mask == nil { - return nil, fmt.Errorf("could not parse %q as anchor IPv4 mask", iface.AnchorIPv4.Netmask) - } - addresses = append(addresses, net.IPNet{ - IP: ip, - Mask: net.IPMask(mask), - }) - - if useRoute { - routes = append(routes, route{ - destination: net.IPNet{ - IP: net.IPv4zero, - Mask: net.IPMask(net.IPv4zero), - }, - }) - } - } - - hwaddr, err := net.ParseMAC(iface.MAC) - if err != nil { - return nil, err - } - - if nameservers == nil { - nameservers = []net.IP{} - } - - return &logicalInterface{ - hwaddr: hwaddr, - config: configMethodStatic{ - addresses: addresses, - nameservers: nameservers, - routes: routes, - }, - }, nil -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/interface.go b/vendor/github.com/coreos/coreos-cloudinit/network/interface.go deleted file mode 100644 index 73a83cbc..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/interface.go +++ /dev/null @@ -1,340 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "fmt" - "net" - "sort" - "strconv" - "strings" -) - -type InterfaceGenerator interface { - Name() string - Filename() string - Netdev() string - Link() string - Network() string - Type() string - ModprobeParams() string -} - -type networkInterface interface { - InterfaceGenerator - Children() []networkInterface - setConfigDepth(int) -} - -type logicalInterface struct { - name string - hwaddr net.HardwareAddr - config configMethod - children []networkInterface - configDepth int -} - -func (i *logicalInterface) Name() string { - return i.name -} - -func (i *logicalInterface) Network() string { - config := fmt.Sprintln("[Match]") - if i.name != "" { - config += fmt.Sprintf("Name=%s\n", i.name) - } - if i.hwaddr != nil { - config += fmt.Sprintf("MACAddress=%s\n", i.hwaddr) - } - config += "\n[Network]\n" - - for _, child := range i.children { - switch iface := child.(type) { - case *vlanInterface: - config += fmt.Sprintf("VLAN=%s\n", iface.name) - case *bondInterface: - config += fmt.Sprintf("Bond=%s\n", iface.name) - } - } - - switch conf := i.config.(type) { - case configMethodStatic: - for _, nameserver := range conf.nameservers { - config += fmt.Sprintf("DNS=%s\n", nameserver) - } - for _, addr := range conf.addresses { - config += fmt.Sprintf("\n[Address]\nAddress=%s\n", addr.String()) - } - for _, route := range conf.routes { - config += fmt.Sprintf("\n[Route]\nDestination=%s\nGateway=%s\n", route.destination.String(), route.gateway) - } - case configMethodDHCP: - config += "DHCP=true\n" - } - - return config -} - -func (i *logicalInterface) Link() string { - return "" -} - -func (i *logicalInterface) Netdev() string { - return "" -} - -func (i *logicalInterface) Filename() string { - name := i.name - if name == "" { - name = i.hwaddr.String() - } - return fmt.Sprintf("%02x-%s", i.configDepth, name) -} - -func (i *logicalInterface) Children() []networkInterface { - return i.children -} - -func (i *logicalInterface) ModprobeParams() string { - return "" -} - -func (i *logicalInterface) setConfigDepth(depth int) { - i.configDepth = depth -} - -type physicalInterface struct { - logicalInterface -} - -func (p *physicalInterface) Type() string { - return "physical" -} - -type bondInterface struct { - logicalInterface - slaves []string - options map[string]string -} - -func (b *bondInterface) Netdev() string { - config := fmt.Sprintf("[NetDev]\nKind=bond\nName=%s\n", b.name) - if b.hwaddr != nil { - config += fmt.Sprintf("MACAddress=%s\n", b.hwaddr.String()) - } - - config += fmt.Sprintf("\n[Bond]\n") - for _, name := range sortedKeys(b.options) { - config += fmt.Sprintf("%s=%s\n", name, b.options[name]) - } - - return config -} - -func (b *bondInterface) Type() string { - return "bond" -} - -func (b *bondInterface) ModprobeParams() string { - params := "" - for _, name := range sortedKeys(b.options) { - params += fmt.Sprintf("%s=%s ", name, b.options[name]) - } - params = strings.TrimSuffix(params, " ") - return params -} - -type vlanInterface struct { - logicalInterface - id int - rawDevice string -} - -func (v *vlanInterface) Netdev() string { - config := fmt.Sprintf("[NetDev]\nKind=vlan\nName=%s\n", v.name) - switch c := v.config.(type) { - case configMethodStatic: - if c.hwaddress != nil { - config += fmt.Sprintf("MACAddress=%s\n", c.hwaddress) - } - case configMethodDHCP: - if c.hwaddress != nil { - config += fmt.Sprintf("MACAddress=%s\n", c.hwaddress) - } - } - config += fmt.Sprintf("\n[VLAN]\nId=%d\n", v.id) - return config -} - -func (v *vlanInterface) Type() string { - return "vlan" -} - -func buildInterfaces(stanzas []*stanzaInterface) []InterfaceGenerator { - interfaceMap := createInterfaces(stanzas) - linkAncestors(interfaceMap) - markConfigDepths(interfaceMap) - - interfaces := make([]InterfaceGenerator, 0, len(interfaceMap)) - for _, name := range sortedInterfaces(interfaceMap) { - interfaces = append(interfaces, interfaceMap[name]) - } - - return interfaces -} - -func createInterfaces(stanzas []*stanzaInterface) map[string]networkInterface { - interfaceMap := make(map[string]networkInterface) - for _, iface := range stanzas { - switch iface.kind { - case interfaceBond: - bondOptions := make(map[string]string) - for _, k := range []string{"mode", "miimon", "lacp-rate"} { - if v, ok := iface.options["bond-"+k]; ok && len(v) > 0 { - bondOptions[k] = v[0] - } - } - interfaceMap[iface.name] = &bondInterface{ - logicalInterface{ - name: iface.name, - config: iface.configMethod, - children: []networkInterface{}, - }, - iface.options["bond-slaves"], - bondOptions, - } - for _, slave := range iface.options["bond-slaves"] { - if _, ok := interfaceMap[slave]; !ok { - interfaceMap[slave] = &physicalInterface{ - logicalInterface{ - name: slave, - config: configMethodManual{}, - children: []networkInterface{}, - }, - } - } - } - - case interfacePhysical: - if _, ok := iface.configMethod.(configMethodLoopback); ok { - continue - } - interfaceMap[iface.name] = &physicalInterface{ - logicalInterface{ - name: iface.name, - config: iface.configMethod, - children: []networkInterface{}, - }, - } - - case interfaceVLAN: - var rawDevice string - id, _ := strconv.Atoi(iface.options["id"][0]) - if device := iface.options["raw_device"]; len(device) == 1 { - rawDevice = device[0] - if _, ok := interfaceMap[rawDevice]; !ok { - interfaceMap[rawDevice] = &physicalInterface{ - logicalInterface{ - name: rawDevice, - config: configMethodManual{}, - children: []networkInterface{}, - }, - } - } - } - interfaceMap[iface.name] = &vlanInterface{ - logicalInterface{ - name: iface.name, - config: iface.configMethod, - children: []networkInterface{}, - }, - id, - rawDevice, - } - } - } - return interfaceMap -} - -func linkAncestors(interfaceMap map[string]networkInterface) { - for _, name := range sortedInterfaces(interfaceMap) { - iface := interfaceMap[name] - switch i := iface.(type) { - case *vlanInterface: - if parent, ok := interfaceMap[i.rawDevice]; ok { - switch p := parent.(type) { - case *physicalInterface: - p.children = append(p.children, iface) - case *bondInterface: - p.children = append(p.children, iface) - } - } - case *bondInterface: - for _, slave := range i.slaves { - if parent, ok := interfaceMap[slave]; ok { - switch p := parent.(type) { - case *physicalInterface: - p.children = append(p.children, iface) - case *bondInterface: - p.children = append(p.children, iface) - } - } - } - } - } -} - -func markConfigDepths(interfaceMap map[string]networkInterface) { - rootInterfaceMap := make(map[string]networkInterface) - for k, v := range interfaceMap { - rootInterfaceMap[k] = v - } - - for _, iface := range interfaceMap { - for _, child := range iface.Children() { - delete(rootInterfaceMap, child.Name()) - } - } - for _, iface := range rootInterfaceMap { - setDepth(iface) - } -} - -func setDepth(iface networkInterface) int { - maxDepth := 0 - for _, child := range iface.Children() { - if depth := setDepth(child); depth > maxDepth { - maxDepth = depth - } - } - iface.setConfigDepth(maxDepth) - return (maxDepth + 1) -} - -func sortedKeys(m map[string]string) (keys []string) { - for key := range m { - keys = append(keys, key) - } - sort.Strings(keys) - return -} - -func sortedInterfaces(m map[string]networkInterface) (keys []string) { - for key := range m { - keys = append(keys, key) - } - sort.Strings(keys) - return -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/packet.go b/vendor/github.com/coreos/coreos-cloudinit/network/packet.go deleted file mode 100644 index a2834ff9..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/packet.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "net" - - "github.com/coreos/coreos-cloudinit/datasource/metadata/packet" -) - -func ProcessPacketNetconf(netdata packet.NetworkData) ([]InterfaceGenerator, error) { - var nameservers []net.IP - if netdata.DNS != nil { - nameservers = netdata.DNS - } else { - nameservers = append(nameservers, net.ParseIP("8.8.8.8"), net.ParseIP("8.8.4.4")) - } - - generators, err := parseNetwork(netdata, nameservers) - if err != nil { - return nil, err - } - - return generators, nil -} - -func parseNetwork(netdata packet.NetworkData, nameservers []net.IP) ([]InterfaceGenerator, error) { - var interfaces []InterfaceGenerator - var addresses []net.IPNet - var routes []route - for _, netblock := range netdata.Netblocks { - addresses = append(addresses, net.IPNet{ - IP: netblock.Address, - Mask: net.IPMask(netblock.Netmask), - }) - if netblock.Public == false { - routes = append(routes, route{ - destination: net.IPNet{ - IP: net.IPv4(10, 0, 0, 0), - Mask: net.IPv4Mask(255, 0, 0, 0), - }, - gateway: netblock.Gateway, - }) - } else { - if netblock.AddressFamily == 4 { - routes = append(routes, route{ - destination: net.IPNet{ - IP: net.IPv4zero, - Mask: net.IPMask(net.IPv4zero), - }, - gateway: netblock.Gateway, - }) - } else { - routes = append(routes, route{ - destination: net.IPNet{ - IP: net.IPv6zero, - Mask: net.IPMask(net.IPv6zero), - }, - gateway: netblock.Gateway, - }) - } - } - } - - bond := bondInterface{ - logicalInterface: logicalInterface{ - name: "bond0", - config: configMethodStatic{ - addresses: addresses, - nameservers: nameservers, - routes: routes, - }, - }, - options: map[string]string{ - "Mode": "802.3ad", - "LACPTransmitRate": "fast", - "MIIMonitorSec": ".2", - "UpDelaySec": ".2", - "DownDelaySec": ".2", - }, - } - - bond.hwaddr, _ = net.ParseMAC(netdata.Interfaces[0].Mac) - - for index, iface := range netdata.Interfaces { - bond.slaves = append(bond.slaves, iface.Name) - - interfaces = append(interfaces, &physicalInterface{ - logicalInterface: logicalInterface{ - name: iface.Name, - config: configMethodStatic{ - nameservers: nameservers, - }, - children: []networkInterface{&bond}, - configDepth: index, - }, - }) - } - - interfaces = append(interfaces, &bond) - - return interfaces, nil -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/stanza.go b/vendor/github.com/coreos/coreos-cloudinit/network/stanza.go deleted file mode 100644 index 88bca813..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/stanza.go +++ /dev/null @@ -1,340 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -type stanza interface{} - -type stanzaAuto struct { - interfaces []string -} - -type stanzaInterface struct { - name string - kind interfaceKind - auto bool - configMethod configMethod - options map[string][]string -} - -type interfaceKind int - -const ( - interfaceBond = interfaceKind(iota) - interfacePhysical - interfaceVLAN -) - -type route struct { - destination net.IPNet - gateway net.IP -} - -type configMethod interface{} - -type configMethodStatic struct { - addresses []net.IPNet - nameservers []net.IP - routes []route - hwaddress net.HardwareAddr -} - -type configMethodLoopback struct{} - -type configMethodManual struct{} - -type configMethodDHCP struct { - hwaddress net.HardwareAddr -} - -func parseStanzas(lines []string) (stanzas []stanza, err error) { - rawStanzas, err := splitStanzas(lines) - if err != nil { - return nil, err - } - - stanzas = make([]stanza, 0, len(rawStanzas)) - for _, rawStanza := range rawStanzas { - if stanza, err := parseStanza(rawStanza); err == nil { - stanzas = append(stanzas, stanza) - } else { - return nil, err - } - } - - autos := make([]string, 0) - interfaceMap := make(map[string]*stanzaInterface) - for _, stanza := range stanzas { - switch c := stanza.(type) { - case *stanzaAuto: - autos = append(autos, c.interfaces...) - case *stanzaInterface: - interfaceMap[c.name] = c - } - } - - // Apply the auto attribute - for _, auto := range autos { - if iface, ok := interfaceMap[auto]; ok { - iface.auto = true - } - } - - return stanzas, nil -} - -func splitStanzas(lines []string) ([][]string, error) { - var curStanza []string - stanzas := make([][]string, 0) - for _, line := range lines { - if isStanzaStart(line) { - if curStanza != nil { - stanzas = append(stanzas, curStanza) - } - curStanza = []string{line} - } else if curStanza != nil { - curStanza = append(curStanza, line) - } else { - return nil, fmt.Errorf("missing stanza start %q", line) - } - } - - if curStanza != nil { - stanzas = append(stanzas, curStanza) - } - - return stanzas, nil -} - -func isStanzaStart(line string) bool { - switch strings.Split(line, " ")[0] { - case "auto": - fallthrough - case "iface": - fallthrough - case "mapping": - return true - } - - if strings.HasPrefix(line, "allow-") { - return true - } - - return false -} - -func parseStanza(rawStanza []string) (stanza, error) { - if len(rawStanza) == 0 { - panic("empty stanza") - } - tokens := strings.Fields(rawStanza[0]) - if len(tokens) < 2 { - return nil, fmt.Errorf("malformed stanza start %q", rawStanza[0]) - } - - kind := tokens[0] - attributes := tokens[1:] - - switch kind { - case "auto": - return parseAutoStanza(attributes, rawStanza[1:]) - case "iface": - return parseInterfaceStanza(attributes, rawStanza[1:]) - default: - return nil, fmt.Errorf("unknown stanza %q", kind) - } -} - -func parseAutoStanza(attributes []string, options []string) (*stanzaAuto, error) { - return &stanzaAuto{interfaces: attributes}, nil -} - -func parseInterfaceStanza(attributes []string, options []string) (*stanzaInterface, error) { - if len(attributes) != 3 { - return nil, fmt.Errorf("incorrect number of attributes") - } - - iface := attributes[0] - confMethod := attributes[2] - - optionMap := make(map[string][]string, 0) - for _, option := range options { - if strings.HasPrefix(option, "post-up") { - tokens := strings.SplitAfterN(option, " ", 2) - if len(tokens) != 2 { - continue - } - if v, ok := optionMap["post-up"]; ok { - optionMap["post-up"] = append(v, tokens[1]) - } else { - optionMap["post-up"] = []string{tokens[1]} - } - } else if strings.HasPrefix(option, "pre-down") { - tokens := strings.SplitAfterN(option, " ", 2) - if len(tokens) != 2 { - continue - } - if v, ok := optionMap["pre-down"]; ok { - optionMap["pre-down"] = append(v, tokens[1]) - } else { - optionMap["pre-down"] = []string{tokens[1]} - } - } else { - tokens := strings.Fields(option) - optionMap[tokens[0]] = tokens[1:] - } - } - - var conf configMethod - switch confMethod { - case "static": - config := configMethodStatic{ - addresses: make([]net.IPNet, 1), - routes: make([]route, 0), - nameservers: make([]net.IP, 0), - } - if addresses, ok := optionMap["address"]; ok { - if len(addresses) == 1 { - config.addresses[0].IP = net.ParseIP(addresses[0]) - } - } - if netmasks, ok := optionMap["netmask"]; ok { - if len(netmasks) == 1 { - config.addresses[0].Mask = net.IPMask(net.ParseIP(netmasks[0]).To4()) - } - } - if config.addresses[0].IP == nil || config.addresses[0].Mask == nil { - return nil, fmt.Errorf("malformed static network config for %q", iface) - } - if gateways, ok := optionMap["gateway"]; ok { - if len(gateways) == 1 { - config.routes = append(config.routes, route{ - destination: net.IPNet{ - IP: net.IPv4(0, 0, 0, 0), - Mask: net.IPv4Mask(0, 0, 0, 0), - }, - gateway: net.ParseIP(gateways[0]), - }) - } - } - if hwaddress, err := parseHwaddress(optionMap, iface); err == nil { - config.hwaddress = hwaddress - } else { - return nil, err - } - for _, nameserver := range optionMap["dns-nameservers"] { - config.nameservers = append(config.nameservers, net.ParseIP(nameserver)) - } - for _, postup := range optionMap["post-up"] { - if strings.HasPrefix(postup, "route add") { - route := route{} - fields := strings.Fields(postup) - for i, field := range fields[:len(fields)-1] { - switch field { - case "-net": - if _, dst, err := net.ParseCIDR(fields[i+1]); err == nil { - route.destination = *dst - } else { - route.destination.IP = net.ParseIP(fields[i+1]) - } - case "netmask": - route.destination.Mask = net.IPMask(net.ParseIP(fields[i+1]).To4()) - case "gw": - route.gateway = net.ParseIP(fields[i+1]) - } - } - if route.destination.IP != nil && route.destination.Mask != nil && route.gateway != nil { - config.routes = append(config.routes, route) - } - } - } - conf = config - case "loopback": - conf = configMethodLoopback{} - case "manual": - conf = configMethodManual{} - case "dhcp": - config := configMethodDHCP{} - if hwaddress, err := parseHwaddress(optionMap, iface); err == nil { - config.hwaddress = hwaddress - } else { - return nil, err - } - conf = config - default: - return nil, fmt.Errorf("invalid config method %q", confMethod) - } - - if _, ok := optionMap["vlan_raw_device"]; ok { - return parseVLANStanza(iface, conf, attributes, optionMap) - } - - if strings.Contains(iface, ".") { - return parseVLANStanza(iface, conf, attributes, optionMap) - } - - if _, ok := optionMap["bond-slaves"]; ok { - return parseBondStanza(iface, conf, attributes, optionMap) - } - - return parsePhysicalStanza(iface, conf, attributes, optionMap) -} - -func parseHwaddress(options map[string][]string, iface string) (net.HardwareAddr, error) { - if hwaddress, ok := options["hwaddress"]; ok && len(hwaddress) == 2 { - switch hwaddress[0] { - case "ether": - if address, err := net.ParseMAC(hwaddress[1]); err == nil { - return address, nil - } - return nil, fmt.Errorf("malformed hwaddress option for %q", iface) - } - } - return nil, nil -} - -func parseBondStanza(iface string, conf configMethod, attributes []string, options map[string][]string) (*stanzaInterface, error) { - return &stanzaInterface{name: iface, kind: interfaceBond, configMethod: conf, options: options}, nil -} - -func parsePhysicalStanza(iface string, conf configMethod, attributes []string, options map[string][]string) (*stanzaInterface, error) { - return &stanzaInterface{name: iface, kind: interfacePhysical, configMethod: conf, options: options}, nil -} - -func parseVLANStanza(iface string, conf configMethod, attributes []string, options map[string][]string) (*stanzaInterface, error) { - var id string - if strings.Contains(iface, ".") { - tokens := strings.Split(iface, ".") - id = tokens[len(tokens)-1] - } else if strings.HasPrefix(iface, "vlan") { - id = strings.TrimPrefix(iface, "vlan") - } else { - return nil, fmt.Errorf("malformed vlan name %q", iface) - } - - if _, err := strconv.Atoi(id); err != nil { - return nil, fmt.Errorf("malformed vlan name %q", iface) - } - options["id"] = []string{id} - options["raw_device"] = options["vlan_raw_device"] - - return &stanzaInterface{name: iface, kind: interfaceVLAN, configMethod: conf, options: options}, nil -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/network/vmware.go b/vendor/github.com/coreos/coreos-cloudinit/network/vmware.go deleted file mode 100644 index 230be42a..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/network/vmware.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package network - -import ( - "fmt" - "log" - "net" -) - -func ProcessVMwareNetconf(config map[string]string) ([]InterfaceGenerator, error) { - log.Println("Processing VMware network config") - - log.Println("Parsing nameservers") - var nameservers []net.IP - for i := 0; ; i++ { - if ipStr, ok := config[fmt.Sprintf("dns.server.%d", i)]; ok { - if ip := net.ParseIP(ipStr); ip != nil { - nameservers = append(nameservers, ip) - } else { - return nil, fmt.Errorf("invalid nameserver: %q", ipStr) - } - } else { - break - } - } - log.Printf("Parsed %d nameservers", len(nameservers)) - - var interfaces []InterfaceGenerator - for i := 0; ; i++ { - var addresses []net.IPNet - var routes []route - var err error - var dhcp bool - iface := &physicalInterface{} - - log.Printf("Proccessing interface %d", i) - - log.Println("Processing DHCP") - if dhcp, err = processDHCPConfig(config, fmt.Sprintf("interface.%d.", i)); err != nil { - return nil, err - } - - log.Println("Processing addresses") - if as, err := processAddressConfig(config, fmt.Sprintf("interface.%d.", i)); err == nil { - addresses = append(addresses, as...) - } else { - return nil, err - } - - log.Println("Processing routes") - if rs, err := processRouteConfig(config, fmt.Sprintf("interface.%d.", i)); err == nil { - routes = append(routes, rs...) - } else { - return nil, err - } - - if mac, ok := config[fmt.Sprintf("interface.%d.mac", i)]; ok { - log.Printf("Parsing interface %d MAC address: %q", i, mac) - if hwaddr, err := net.ParseMAC(mac); err == nil { - iface.hwaddr = hwaddr - } else { - return nil, fmt.Errorf("error while parsing MAC address: %v", err) - } - } - - if name, ok := config[fmt.Sprintf("interface.%d.name", i)]; ok { - log.Printf("Parsing interface %d name: %q", i, name) - iface.name = name - } - - if len(addresses) > 0 || len(routes) > 0 { - iface.config = configMethodStatic{ - hwaddress: iface.hwaddr, - addresses: addresses, - nameservers: nameservers, - routes: routes, - } - } else if dhcp { - iface.config = configMethodDHCP{ - hwaddress: iface.hwaddr, - } - } else { - break - } - - interfaces = append(interfaces, iface) - } - - return interfaces, nil -} - -func processAddressConfig(config map[string]string, prefix string) (addresses []net.IPNet, err error) { - for a := 0; ; a++ { - prefix := fmt.Sprintf("%sip.%d.", prefix, a) - - addressStr, ok := config[prefix+"address"] - if !ok { - break - } - - ip, network, err := net.ParseCIDR(addressStr) - if err != nil { - return nil, fmt.Errorf("invalid address: %q", addressStr) - } - addresses = append(addresses, net.IPNet{ - IP: ip, - Mask: network.Mask, - }) - } - - return -} - -func processRouteConfig(config map[string]string, prefix string) (routes []route, err error) { - for r := 0; ; r++ { - prefix := fmt.Sprintf("%sroute.%d.", prefix, r) - - gatewayStr, gok := config[prefix+"gateway"] - destinationStr, dok := config[prefix+"destination"] - if gok && !dok { - return nil, fmt.Errorf("missing destination key") - } else if !gok && dok { - return nil, fmt.Errorf("missing gateway key") - } else if !gok && !dok { - break - } - - gateway := net.ParseIP(gatewayStr) - if gateway == nil { - return nil, fmt.Errorf("invalid gateway: %q", gatewayStr) - } - - _, destination, err := net.ParseCIDR(destinationStr) - if err != nil { - return nil, err - } - - routes = append(routes, route{ - destination: *destination, - gateway: gateway, - }) - } - - return -} - -func processDHCPConfig(config map[string]string, prefix string) (dhcp bool, err error) { - dhcpStr, ok := config[prefix+"dhcp"] - if !ok { - return false, nil - } - - switch dhcpStr { - case "yes": - return true, nil - case "no": - return false, nil - default: - return false, fmt.Errorf("invalid DHCP option: %q", dhcpStr) - } -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/networkd.go b/vendor/github.com/coreos/coreos-cloudinit/system/networkd.go deleted file mode 100644 index 025b09cb..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/networkd.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "log" - "net" - "os/exec" - "strings" - - "github.com/coreos/coreos-cloudinit/config" - "github.com/coreos/coreos-cloudinit/network" - - "github.com/docker/libcontainer/netlink" -) - -func RestartNetwork(interfaces []network.InterfaceGenerator) (err error) { - defer func() { - if e := restartNetworkd(); e != nil { - err = e - } - }() - - if err = downNetworkInterfaces(interfaces); err != nil { - return - } - - if err = maybeProbe8012q(interfaces); err != nil { - return - } - return maybeProbeBonding(interfaces) -} - -func downNetworkInterfaces(interfaces []network.InterfaceGenerator) error { - sysInterfaceMap := make(map[string]*net.Interface) - if systemInterfaces, err := net.Interfaces(); err == nil { - for _, iface := range systemInterfaces { - iface := iface - sysInterfaceMap[iface.Name] = &iface - } - } else { - return err - } - - for _, iface := range interfaces { - if systemInterface, ok := sysInterfaceMap[iface.Name()]; ok { - log.Printf("Taking down interface %q\n", systemInterface.Name) - if err := netlink.NetworkLinkDown(systemInterface); err != nil { - log.Printf("Error while downing interface %q (%s). Continuing...\n", systemInterface.Name, err) - } - } - } - - return nil -} - -func maybeProbe8012q(interfaces []network.InterfaceGenerator) error { - for _, iface := range interfaces { - if iface.Type() == "vlan" { - log.Printf("Probing LKM %q (%q)\n", "8021q", "8021q") - return exec.Command("modprobe", "8021q").Run() - } - } - return nil -} - -func maybeProbeBonding(interfaces []network.InterfaceGenerator) error { - for _, iface := range interfaces { - if iface.Type() == "bond" { - args := append([]string{"bonding"}, strings.Split(iface.ModprobeParams(), " ")...) - log.Printf("Probing LKM %q (%q)\n", "bonding", args) - return exec.Command("modprobe", args...).Run() - } - } - return nil -} - -func restartNetworkd() error { - log.Printf("Restarting networkd.service\n") - networkd := Unit{config.Unit{Name: "systemd-networkd.service"}} - _, err := NewUnitManager("").RunUnitCommand(networkd, "restart") - return err -} diff --git a/vendor/github.com/coreos/coreos-cloudinit/system/systemd.go b/vendor/github.com/coreos/coreos-cloudinit/system/systemd.go deleted file mode 100644 index 190b9e77..00000000 --- a/vendor/github.com/coreos/coreos-cloudinit/system/systemd.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "fmt" - "io/ioutil" - "log" - "os" - "os/exec" - "path" - "strings" - - "github.com/coreos/go-systemd/dbus" - "github.com/coreos/coreos-cloudinit/config" -) - -func NewUnitManager(root string) UnitManager { - return &systemd{root} -} - -type systemd struct { - root string -} - -// fakeMachineID is placed on non-usr CoreOS images and should -// never be used as a true MachineID -const fakeMachineID = "42000000000000000000000000000042" - -// PlaceUnit writes a unit file at its desired destination, creating parent -// directories as necessary. -func (s *systemd) PlaceUnit(u Unit) error { - file := File{config.File{ - Path: u.Destination(s.root), - Content: u.Content, - RawFilePermissions: "0644", - }} - - _, err := WriteFile(&file, "/") - return err -} - -// PlaceUnitDropIn writes a unit drop-in file at its desired destination, -// creating parent directories as necessary. -func (s *systemd) PlaceUnitDropIn(u Unit, d config.UnitDropIn) error { - file := File{config.File{ - Path: u.DropInDestination(s.root, d), - Content: d.Content, - RawFilePermissions: "0644", - }} - - _, err := WriteFile(&file, "/") - return err -} - -func (s *systemd) EnableUnitFile(u Unit) error { - conn, err := dbus.New() - if err != nil { - return err - } - - units := []string{u.Name} - _, _, err = conn.EnableUnitFiles(units, u.Runtime, true) - return err -} - -func (s *systemd) RunUnitCommand(u Unit, c string) (string, error) { - conn, err := dbus.New() - if err != nil { - return "", err - } - - var fn func(string, string) (string, error) - switch c { - case "start": - fn = conn.StartUnit - case "stop": - fn = conn.StopUnit - case "restart": - fn = conn.RestartUnit - case "reload": - fn = conn.ReloadUnit - case "try-restart": - fn = conn.TryRestartUnit - case "reload-or-restart": - fn = conn.ReloadOrRestartUnit - case "reload-or-try-restart": - fn = conn.ReloadOrTryRestartUnit - default: - return "", fmt.Errorf("Unsupported systemd command %q", c) - } - - return fn(u.Name, "replace") -} - -func (s *systemd) DaemonReload() error { - conn, err := dbus.New() - if err != nil { - return err - } - - return conn.Reload() -} - -// MaskUnit masks the given Unit by symlinking its unit file to -// /dev/null, analogous to `systemctl mask`. -// N.B.: Unlike `systemctl mask`, this function will *remove any existing unit -// file at the location*, to ensure that the mask will succeed. -func (s *systemd) MaskUnit(u Unit) error { - masked := u.Destination(s.root) - if _, err := os.Stat(masked); os.IsNotExist(err) { - if err := os.MkdirAll(path.Dir(masked), os.FileMode(0755)); err != nil { - return err - } - } else if err := os.Remove(masked); err != nil { - return err - } - return os.Symlink("/dev/null", masked) -} - -// UnmaskUnit is analogous to systemd's unit_file_unmask. If the file -// associated with the given Unit is empty or appears to be a symlink to -// /dev/null, it is removed. -func (s *systemd) UnmaskUnit(u Unit) error { - masked := u.Destination(s.root) - ne, err := nullOrEmpty(masked) - if os.IsNotExist(err) { - return nil - } else if err != nil { - return err - } - if !ne { - log.Printf("%s is not null or empty, refusing to unmask", masked) - return nil - } - return os.Remove(masked) -} - -// nullOrEmpty checks whether a given path appears to be an empty regular file -// or a symlink to /dev/null -func nullOrEmpty(path string) (bool, error) { - fi, err := os.Stat(path) - if err != nil { - return false, err - } - m := fi.Mode() - if m.IsRegular() && fi.Size() <= 0 { - return true, nil - } - if m&os.ModeCharDevice > 0 { - return true, nil - } - return false, nil -} - -func ExecuteScript(scriptPath string) (string, error) { - props := []dbus.Property{ - dbus.PropDescription("Unit generated and executed by coreos-cloudinit on behalf of user"), - dbus.PropExecStart([]string{"/bin/bash", scriptPath}, false), - } - - base := path.Base(scriptPath) - name := fmt.Sprintf("coreos-cloudinit-%s.service", base) - - log.Printf("Creating transient systemd unit '%s'", name) - - conn, err := dbus.New() - if err != nil { - return "", err - } - - _, err = conn.StartTransientUnit(name, "replace", props...) - return name, err -} - -func SetHostname(hostname string) error { - return exec.Command("hostnamectl", "set-hostname", hostname).Run() -} - -func Hostname() (string, error) { - return os.Hostname() -} - -func MachineID(root string) string { - contents, _ := ioutil.ReadFile(path.Join(root, "etc", "machine-id")) - id := strings.TrimSpace(string(contents)) - - if id == fakeMachineID { - id = "" - } - - return id -} diff --git a/vendor/github.com/coreos/go-systemd/.travis.yml b/vendor/github.com/coreos/go-systemd/.travis.yml index 8c9f56e4..3c37292e 100644 --- a/vendor/github.com/coreos/go-systemd/.travis.yml +++ b/vendor/github.com/coreos/go-systemd/.travis.yml @@ -1,8 +1,8 @@ language: go -go: 1.2 +go: 1.4 install: - - echo "Skip install" + - go get github.com/godbus/dbus script: - ./test diff --git a/vendor/github.com/coreos/go-systemd/README.md b/vendor/github.com/coreos/go-systemd/README.md index 0ee09fec..cb87a112 100644 --- a/vendor/github.com/coreos/go-systemd/README.md +++ b/vendor/github.com/coreos/go-systemd/README.md @@ -1,29 +1,31 @@ # go-systemd -Go bindings to systemd. The project has three packages: +[![Build Status](https://travis-ci.org/coreos/go-systemd.png?branch=master)](https://travis-ci.org/coreos/go-systemd) +[![godoc](https://godoc.org/github.com/coreos/go-systemd?status.svg)](http://godoc.org/github.com/coreos/go-systemd) -- activation - for writing and using socket activation from Go -- journal - for writing to systemd's logging service, journal -- dbus - for starting/stopping/inspecting running services and units +Go bindings to systemd. The project has several packages: -Go docs for the entire project are here: - -http://godoc.org/github.com/coreos/go-systemd +- `activation` - for writing and using socket activation from Go +- `dbus` - for starting/stopping/inspecting running services and units +- `journal` - for writing to systemd's logging service, journald +- `sdjournal` - for reading from journald by wrapping its C API +- `machine1` - for registering machines/containers with systemd +- `unit` - for (de)serialization and comparison of unit files ## Socket Activation -An example HTTP server using socket activation can be quickly setup by -following this README on a Linux machine running systemd: +An example HTTP server using socket activation can be quickly set up by following this README on a Linux machine running systemd: https://github.com/coreos/go-systemd/tree/master/examples/activation/httpserver ## Journal -Using this package you can submit journal entries directly to systemd's journal taking advantage of features like indexed key/value pairs for each log entry. +Using the pure-Go `journal` package you can submit journal entries directly to systemd's journal, taking advantage of features like indexed key/value pairs for each log entry. +The `sdjournal` package provides read access to the journal by wrapping around journald's native C API; consequently it requires cgo and the journal headers to be available. ## D-Bus -The D-Bus API lets you start, stop and introspect systemd units. The API docs are here: +The `dbus` package connects to the [systemd D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/dbus/) and lets you start, stop and introspect systemd units. The API docs are here: http://godoc.org/github.com/coreos/go-systemd/dbus @@ -42,3 +44,11 @@ Create `/etc/dbus-1/system-local.conf` that looks like this: ``` + +## machined + +The `machine1` package allows interaction with the [systemd machined D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/machined/). + +## Units + +The `unit` package provides various functions for working with [systemd unit files](http://www.freedesktop.org/software/systemd/man/systemd.unit.html). diff --git a/vendor/github.com/coreos/go-systemd/activation/files.go b/vendor/github.com/coreos/go-systemd/activation/files.go new file mode 100644 index 00000000..c8e85fcd --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/activation/files.go @@ -0,0 +1,52 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package activation implements primitives for systemd socket activation. +package activation + +import ( + "os" + "strconv" + "syscall" +) + +// based on: https://gist.github.com/alberts/4640792 +const ( + listenFdsStart = 3 +) + +func Files(unsetEnv bool) []*os.File { + if unsetEnv { + defer os.Unsetenv("LISTEN_PID") + defer os.Unsetenv("LISTEN_FDS") + } + + pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) + if err != nil || pid != os.Getpid() { + return nil + } + + nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) + if err != nil || nfds == 0 { + return nil + } + + files := make([]*os.File, 0, nfds) + for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { + syscall.CloseOnExec(fd) + files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd))) + } + + return files +} diff --git a/vendor/github.com/coreos/go-systemd/activation/listeners.go b/vendor/github.com/coreos/go-systemd/activation/listeners.go new file mode 100644 index 00000000..df27c29e --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/activation/listeners.go @@ -0,0 +1,62 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "crypto/tls" + "net" +) + +// Listeners returns a slice containing a net.Listener for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, tcp", then the slice would contain {nil, net.Listener, net.Listener} +func Listeners(unsetEnv bool) ([]net.Listener, error) { + files := Files(unsetEnv) + listeners := make([]net.Listener, len(files)) + + for i, f := range files { + if pc, err := net.FileListener(f); err == nil { + listeners[i] = pc + } + } + return listeners, nil +} + +// TLSListeners returns a slice containing a net.listener for each matching TCP socket type +// passed to this process. +// It uses default Listeners func and forces TCP sockets handlers to use TLS based on tlsConfig. +func TLSListeners(unsetEnv bool, tlsConfig *tls.Config) ([]net.Listener, error) { + listeners, err := Listeners(unsetEnv) + + if listeners == nil || err != nil { + return nil, err + } + + if tlsConfig != nil && err == nil { + tlsConfig.NextProtos = []string{"http/1.1"} + + for i, l := range listeners { + // Activate TLS only for TCP sockets + if l.Addr().Network() == "tcp" { + listeners[i] = tls.NewListener(l, tlsConfig) + } + } + } + + return listeners, err +} diff --git a/vendor/github.com/coreos/go-systemd/activation/packetconns.go b/vendor/github.com/coreos/go-systemd/activation/packetconns.go new file mode 100644 index 00000000..48b2ca02 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/activation/packetconns.go @@ -0,0 +1,37 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "net" +) + +// PacketConns returns a slice containing a net.PacketConn for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, udp", then the slice would contain {net.PacketConn, nil, net.PacketConn} +func PacketConns(unsetEnv bool) ([]net.PacketConn, error) { + files := Files(unsetEnv) + conns := make([]net.PacketConn, len(files)) + + for i, f := range files { + if pc, err := net.FilePacketConn(f); err == nil { + conns[i] = pc + } + } + return conns, nil +} diff --git a/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go new file mode 100644 index 00000000..b92b1911 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go @@ -0,0 +1,31 @@ +// Code forked from Docker project +package daemon + +import ( + "errors" + "net" + "os" +) + +var SdNotifyNoSocket = errors.New("No socket") + +// SdNotify sends a message to the init daemon. It is common to ignore the error. +func SdNotify(state string) error { + socketAddr := &net.UnixAddr{ + Name: os.Getenv("NOTIFY_SOCKET"), + Net: "unixgram", + } + + if socketAddr.Name == "" { + return SdNotifyNoSocket + } + + conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) + if err != nil { + return err + } + defer conn.Close() + + _, err = conn.Write([]byte(state)) + return err +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/dbus/dbus.go index 88927b7f..5dd748e6 100644 --- a/vendor/github.com/coreos/go-systemd/dbus/dbus.go +++ b/vendor/github.com/coreos/go-systemd/dbus/dbus.go @@ -1,49 +1,77 @@ -/* -Copyright 2013 CoreOS Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ package dbus import ( + "fmt" "os" "strconv" "strings" "sync" - "github.com/guelfey/go.dbus" + "github.com/godbus/dbus" ) -const signalBuffer = 100 +const ( + alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` + num = `0123456789` + alphanum = alpha + num + signalBuffer = 100 +) -// ObjectPath creates a dbus.ObjectPath using the rules that systemd uses for -// serializing special characters. -func ObjectPath(path string) dbus.ObjectPath { - path = strings.Replace(path, ".", "_2e", -1) - path = strings.Replace(path, "-", "_2d", -1) - path = strings.Replace(path, "@", "_40", -1) - - return dbus.ObjectPath(path) +// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped +func needsEscape(i int, b byte) bool { + // Escape everything that is not a-z-A-Z-0-9 + // Also escape 0-9 if it's the first character + return strings.IndexByte(alphanum, b) == -1 || + (i == 0 && strings.IndexByte(num, b) != -1) } -// Conn is a connection to systemds dbus endpoint. +// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the +// rules that systemd uses for serializing special characters. +func PathBusEscape(path string) string { + // Special case the empty string + if len(path) == 0 { + return "_" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if needsEscape(i, c) { + e := fmt.Sprintf("_%x", c) + n = append(n, []byte(e)...) + } else { + n = append(n, c) + } + } + return string(n) +} + +// Conn is a connection to systemd's dbus endpoint. type Conn struct { - sysconn *dbus.Conn - sysobj *dbus.Object + // sysconn/sysobj are only used to call dbus methods + sysconn *dbus.Conn + sysobj dbus.BusObject + + // sigconn/sigobj are only used to receive dbus signals + sigconn *dbus.Conn + sigobj dbus.BusObject + jobListener struct { - jobs map[dbus.ObjectPath]chan string + jobs map[dbus.ObjectPath]chan<- string sync.Mutex } subscriber struct { @@ -53,26 +81,77 @@ type Conn struct { ignore map[dbus.ObjectPath]int64 cleanIgnore int64 } - dispatch map[string]func(dbus.Signal) } -// New() establishes a connection to the system bus and authenticates. +// New establishes a connection to the system bus and authenticates. +// Callers should call Close() when done with the connection. func New() (*Conn, error) { - c := new(Conn) + return newConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SystemBusPrivate) + }) +} - if err := c.initConnection(); err != nil { +// NewUserConnection establishes a connection to the session bus and +// authenticates. This can be used to connect to systemd user instances. +// Callers should call Close() when done with the connection. +func NewUserConnection() (*Conn, error) { + return newConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SessionBusPrivate) + }) +} + +// NewSystemdConnection establishes a private, direct connection to systemd. +// This can be used for communicating with systemd without a dbus daemon. +// Callers should call Close() when done with the connection. +func NewSystemdConnection() (*Conn, error) { + return newConnection(func() (*dbus.Conn, error) { + // We skip Hello when talking directly to systemd. + return dbusAuthConnection(func() (*dbus.Conn, error) { + return dbus.Dial("unix:path=/run/systemd/private") + }) + }) +} + +// Close closes an established connection +func (c *Conn) Close() { + c.sysconn.Close() + c.sigconn.Close() +} + +func newConnection(createBus func() (*dbus.Conn, error)) (*Conn, error) { + sysconn, err := createBus() + if err != nil { return nil, err } - c.initJobs() + sigconn, err := createBus() + if err != nil { + sysconn.Close() + return nil, err + } + + c := &Conn{ + sysconn: sysconn, + sysobj: systemdObject(sysconn), + sigconn: sigconn, + sigobj: systemdObject(sigconn), + } + + c.subscriber.ignore = make(map[dbus.ObjectPath]int64) + c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) + + // Setup the listeners on jobs so that we can get completions + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + + c.dispatch() return c, nil } -func (c *Conn) initConnection() error { - var err error - c.sysconn, err = dbus.SystemBusPrivate() +func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus() if err != nil { - return err + return nil, err } // Only use EXTERNAL method, and hardcode the uid (not username) @@ -80,25 +159,29 @@ func (c *Conn) initConnection() error { // libc) methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} - err = c.sysconn.Auth(methods) + err = conn.Auth(methods) if err != nil { - c.sysconn.Close() - return err + conn.Close() + return nil, err } - err = c.sysconn.Hello() - if err != nil { - c.sysconn.Close() - return err - } - - c.sysobj = c.sysconn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) - - // Setup the listeners on jobs so that we can get completions - c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") - c.initSubscription() - c.initDispatch() - - return nil + return conn, nil +} + +func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := dbusAuthConnection(createBus) + if err != nil { + return nil, err + } + + if err = conn.Hello(); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func systemdObject(conn *dbus.Conn) dbus.BusObject { + return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) } diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods.go b/vendor/github.com/coreos/go-systemd/dbus/methods.go index 2897b11e..ab614c7c 100644 --- a/vendor/github.com/coreos/go-systemd/dbus/methods.go +++ b/vendor/github.com/coreos/go-systemd/dbus/methods.go @@ -1,29 +1,26 @@ -/* -Copyright 2013 CoreOS Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package dbus import ( "errors" - "github.com/guelfey/go.dbus" -) + "path" + "strconv" -func (c *Conn) initJobs() { - c.jobListener.jobs = make(map[dbus.ObjectPath]chan string) -} + "github.com/godbus/dbus" +) func (c *Conn) jobComplete(signal *dbus.Signal) { var id uint32 @@ -40,29 +37,29 @@ func (c *Conn) jobComplete(signal *dbus.Signal) { c.jobListener.Unlock() } -func (c *Conn) startJob(job string, args ...interface{}) (<-chan string, error) { - c.jobListener.Lock() - defer c.jobListener.Unlock() - - ch := make(chan string, 1) - var path dbus.ObjectPath - err := c.sysobj.Call(job, 0, args...).Store(&path) - if err != nil { - return nil, err +func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) { + if ch != nil { + c.jobListener.Lock() + defer c.jobListener.Unlock() } - c.jobListener.jobs[path] = ch - return ch, nil + + var p dbus.ObjectPath + err := c.sysobj.Call(job, 0, args...).Store(&p) + if err != nil { + return 0, err + } + + if ch != nil { + c.jobListener.jobs[p] = ch + } + + // ignore error since 0 is fine if conversion fails + jobID, _ := strconv.Atoi(path.Base(string(p))) + + return jobID, nil } -func (c *Conn) runJob(job string, args ...interface{}) (string, error) { - respCh, err := c.startJob(job, args...) - if err != nil { - return "", err - } - return <-respCh, nil -} - -// StartUnit enqeues a start job and depending jobs, if any (unless otherwise +// StartUnit enqueues a start job and depending jobs, if any (unless otherwise // specified by the mode string). // // Takes the unit to activate, plus a mode string. The mode needs to be one of @@ -77,50 +74,58 @@ func (c *Conn) runJob(job string, args ...interface{}) (string, error) { // requirement dependencies. It is not recommended to make use of the latter // two options. // -// Result string: one of done, canceled, timeout, failed, dependency, skipped. +// If the provided channel is non-nil, a result string will be sent to it upon +// job completion: one of done, canceled, timeout, failed, dependency, skipped. // done indicates successful execution of a job. canceled indicates that a job // has been canceled before it finished execution. timeout indicates that the // job timeout was reached. failed indicates that the job failed. dependency // indicates that a job this job has been depending on failed and the job hence // has been removed too. skipped indicates that a job was skipped because it // didn't apply to the units current state. -func (c *Conn) StartUnit(name string, mode string) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.StartUnit", name, mode) +// +// If no error occurs, the ID of the underlying systemd job will be returned. There +// does exist the possibility for no error to be returned, but for the returned job +// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint +// should not be considered authoritative. +// +// If an error does occur, it will be returned to the user alongside a job ID of 0. +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) } // StopUnit is similar to StartUnit but stops the specified unit rather // than starting it. -func (c *Conn) StopUnit(name string, mode string) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.StopUnit", name, mode) +func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) } // ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. -func (c *Conn) ReloadUnit(name string, mode string) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) } // RestartUnit restarts a service. If a service is restarted that isn't // running it will be started. -func (c *Conn) RestartUnit(name string, mode string) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) } // TryRestartUnit is like RestartUnit, except that a service that isn't running // is not affected by the restart. -func (c *Conn) TryRestartUnit(name string, mode string) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) } // ReloadOrRestart attempts a reload if the unit supports it and use a restart // otherwise. -func (c *Conn) ReloadOrRestartUnit(name string, mode string) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) } // ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try" // flavored restart otherwise. -func (c *Conn) ReloadOrTryRestartUnit(name string, mode string) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) } // StartTransientUnit() may be used to create and start a transient unit, which @@ -128,8 +133,8 @@ func (c *Conn) ReloadOrTryRestartUnit(name string, mode string) (string, error) // system is rebooted. name is the unit name including suffix, and must be // unique. mode is the same as in StartUnit(), properties contains properties // of the unit. -func (c *Conn) StartTransientUnit(name string, mode string, properties ...Property) (string, error) { - return c.runJob("org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) } // KillUnit takes the unit name and a UNIX signal number to send. All of the unit's @@ -138,12 +143,17 @@ func (c *Conn) KillUnit(name string, signal int32) { c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() } +// ResetFailedUnit resets the "failed" state of a specific unit. +func (c *Conn) ResetFailedUnit(name string) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() +} + // getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) { var err error var props map[string]dbus.Variant - path := ObjectPath("/org/freedesktop/systemd1/unit/" + unit) + path := unitPath(unit) if !path.IsValid() { return nil, errors.New("invalid unit name: " + unit) } @@ -171,7 +181,7 @@ func (c *Conn) getProperty(unit string, dbusInterface string, propertyName strin var err error var prop dbus.Variant - path := ObjectPath("/org/freedesktop/systemd1/unit/" + unit) + path := unitPath(unit) if !path.IsValid() { return nil, errors.New("invalid unit name: " + unit) } @@ -204,11 +214,11 @@ func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]i // to modify. properties are the settings to set, encoded as an array of property // name and value pairs. func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { - return c.sysobj.Call("SetUnitProperties", 0, name, runtime, properties).Store() + return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() } func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { - return c.getProperty(unit, "org.freedesktop.systemd1." + unitType, propertyName) + return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName) } // ListUnits returns an array with all currently loaded units. Note that @@ -253,6 +263,48 @@ type UnitStatus struct { JobPath dbus.ObjectPath // The job object path } +type LinkUnitFileChange EnableUnitFileChange + +// LinkUnitFiles() links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + // EnableUnitFiles() may be used to enable one or more units in the system (by // creating symlinks to them in /etc or /run). // @@ -317,7 +369,7 @@ type EnableUnitFileChange struct { // symlink. func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { result := make([][]interface{}, 0) - err := c.sysobj.Call("DisableUnitFiles", 0, files, runtime).Store(&result) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) if err != nil { return nil, err } @@ -352,3 +404,7 @@ type DisableUnitFileChange struct { func (c *Conn) Reload() error { return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() } + +func unitPath(name string) dbus.ObjectPath { + return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/properties.go b/vendor/github.com/coreos/go-systemd/dbus/properties.go index 83dc1ab6..75200115 100644 --- a/vendor/github.com/coreos/go-systemd/dbus/properties.go +++ b/vendor/github.com/coreos/go-systemd/dbus/properties.go @@ -1,23 +1,21 @@ -/* -Copyright 2013 CoreOS Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package dbus import ( - "github.com/guelfey/go.dbus" + "github.com/godbus/dbus" ) // From the systemd docs: diff --git a/vendor/github.com/coreos/go-systemd/dbus/set.go b/vendor/github.com/coreos/go-systemd/dbus/set.go index 88378b29..f92e6fbe 100644 --- a/vendor/github.com/coreos/go-systemd/dbus/set.go +++ b/vendor/github.com/coreos/go-systemd/dbus/set.go @@ -1,3 +1,17 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package dbus type set struct { @@ -17,10 +31,17 @@ func (s *set) Contains(value string) (exists bool) { return } -func (s *set) Length() (int) { +func (s *set) Length() int { return len(s.data) } -func newSet() (*set) { - return &set{make(map[string] bool)} +func (s *set) Values() (values []string) { + for val, _ := range s.data { + values = append(values, val) + } + return +} + +func newSet() *set { + return &set{make(map[string]bool)} } diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/dbus/subscription.go index f538fdba..99645144 100644 --- a/vendor/github.com/coreos/go-systemd/dbus/subscription.go +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription.go @@ -1,18 +1,16 @@ -/* -Copyright 2013 CoreOS Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package dbus @@ -20,7 +18,7 @@ import ( "errors" "time" - "github.com/guelfey/go.dbus" + "github.com/godbus/dbus" ) const ( @@ -33,14 +31,13 @@ const ( // systemd will automatically stop sending signals so there is no need to // explicitly call Unsubscribe(). func (c *Conn) Subscribe() error { - c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") - c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() + err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() if err != nil { - c.sysconn.Close() return err } @@ -49,45 +46,52 @@ func (c *Conn) Subscribe() error { // Unsubscribe this connection from systemd dbus events. func (c *Conn) Unsubscribe() error { - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() + err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() if err != nil { - c.sysconn.Close() return err } return nil } -func (c *Conn) initSubscription() { - c.subscriber.ignore = make(map[dbus.ObjectPath]int64) -} - -func (c *Conn) initDispatch() { +func (c *Conn) dispatch() { ch := make(chan *dbus.Signal, signalBuffer) - c.sysconn.Signal(ch) + c.sigconn.Signal(ch) go func() { for { - signal := <-ch + signal, ok := <-ch + if !ok { + return + } + + if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { + c.jobComplete(signal) + } + + if c.subscriber.updateCh == nil { + continue + } + + var unitPath dbus.ObjectPath switch signal.Name { case "org.freedesktop.systemd1.Manager.JobRemoved": - c.jobComplete(signal) - unitName := signal.Body[2].(string) - var unitPath dbus.ObjectPath c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) - if unitPath != dbus.ObjectPath("") { - c.sendSubStateUpdate(unitPath) - } case "org.freedesktop.systemd1.Manager.UnitNew": - c.sendSubStateUpdate(signal.Body[1].(dbus.ObjectPath)) + unitPath = signal.Body[1].(dbus.ObjectPath) case "org.freedesktop.DBus.Properties.PropertiesChanged": if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { - // we only care about SubState updates, which are a Unit property - c.sendSubStateUpdate(signal.Path) + unitPath = signal.Path } } + + if unitPath == dbus.ObjectPath("") { + continue + } + + c.sendSubStateUpdate(unitPath) } }() } @@ -101,7 +105,7 @@ func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitSt // SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer // size of the channels, the comparison function for detecting changes and a filter // function for cutting down on the noise that your channel receives. -func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func (string) bool) (<-chan map[string]*UnitStatus, <-chan error) { +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { old := make(map[string]*UnitStatus) statusChan := make(chan map[string]*UnitStatus, buffer) errChan := make(chan error, buffer) @@ -174,9 +178,6 @@ func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) { c.subscriber.Lock() defer c.subscriber.Unlock() - if c.subscriber.updateCh == nil { - return - } if c.shouldIgnore(path) { return diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go index 26257860..5b408d58 100644 --- a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go @@ -1,3 +1,17 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package dbus import ( @@ -11,7 +25,6 @@ type SubscriptionSet struct { conn *Conn } - func (s *SubscriptionSet) filter(unit string) bool { return !s.Contains(unit) } @@ -21,12 +34,24 @@ func (s *SubscriptionSet) filter(unit string) bool { func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { // TODO: Make fully evented by using systemd 209 with properties changed values return s.conn.SubscribeUnitsCustom(time.Second, 0, - func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, + mismatchUnitStatus, func(unit string) bool { return s.filter(unit) }, ) } // NewSubscriptionSet returns a new subscription set. -func (conn *Conn) NewSubscriptionSet() (*SubscriptionSet) { +func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { return &SubscriptionSet{newSet(), conn} } + +// mismatchUnitStatus returns true if the provided UnitStatus objects +// are not equivalent. false is returned if the objects are equivalent. +// Only the Name, Description and state-related fields are used in +// the comparison. +func mismatchUnitStatus(u1, u2 *UnitStatus) bool { + return u1.Name != u2.Name || + u1.Description != u2.Description || + u1.LoadState != u2.LoadState || + u1.ActiveState != u2.ActiveState || + u1.SubState != u2.SubState +} diff --git a/vendor/github.com/coreos/go-systemd/test b/vendor/github.com/coreos/go-systemd/test index 6e043658..bc1b9859 100755 --- a/vendor/github.com/coreos/go-systemd/test +++ b/vendor/github.com/coreos/go-systemd/test @@ -1,3 +1,76 @@ -#!/bin/sh -e +#!/bin/bash -e +# +# Run all tests +# ./test +# ./test -v +# +# Run tests for one package +# PKG=./foo ./test +# PKG=bar ./test +# -go test -v ./... +# Invoke ./cover for HTML output +COVER=${COVER:-"-cover"} + +PROJ="go-systemd" +ORG_PATH="github.com/coreos" +REPO_PATH="${ORG_PATH}/${PROJ}" + +# As a convenience, set up a self-contained GOPATH if none set +if [ -z "$GOPATH" ]; then + if [ ! -h gopath/src/${REPO_PATH} ]; then + mkdir -p gopath/src/${ORG_PATH} + ln -s ../../../.. gopath/src/${REPO_PATH} || exit 255 + fi + export GOPATH=${PWD}/gopath + go get -u github.com/godbus/dbus +fi + +TESTABLE="activation journal login1 machine1 unit" +FORMATTABLE="$TESTABLE sdjournal dbus" +if [ -e "/run/systemd/system/" ]; then + TESTABLE="${TESTABLE} sdjournal" + if [ "$EUID" == "0" ]; then + # testing actual systemd behaviour requires root + TESTABLE="${TESTABLE} dbus" + fi +fi + + +# user has not provided PKG override +if [ -z "$PKG" ]; then + TEST=$TESTABLE + FMT=$FORMATTABLE + +# user has provided PKG override +else + # strip out slashes and dots from PKG=./foo/ + TEST=${PKG//\//} + TEST=${TEST//./} + + # only run gofmt on packages provided by user + FMT="$TEST" +fi + +# split TEST into an array and prepend REPO_PATH to each local package +split=(${TEST// / }) +TEST=${split[@]/#/${REPO_PATH}/} + +echo "Running tests..." +go test ${COVER} $@ ${TEST} + +echo "Checking gofmt..." +fmtRes=$(gofmt -l $FMT) +if [ -n "${fmtRes}" ]; then + echo -e "gofmt checking failed:\n${fmtRes}" + exit 255 +fi + +echo "Checking govet..." +vetRes=$(go vet $TEST) +if [ -n "${vetRes}" ]; then + echo -e "govet checking failed:\n${vetRes}" + exit 255 +fi + +echo "Success" diff --git a/vendor/github.com/coreos/go-systemd/util/util.go b/vendor/github.com/coreos/go-systemd/util/util.go new file mode 100644 index 00000000..33832a1e --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/util/util.go @@ -0,0 +1,33 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package util contains utility functions related to systemd that applications +// can use to check things like whether systemd is running. +package util + +import ( + "os" +) + +// IsRunningSystemd checks whether the host was booted with systemd as its init +// system. This functions similar to systemd's `sd_booted(3)`: internally, it +// checks whether /run/systemd/system/ exists and is a directory. +// http://www.freedesktop.org/software/systemd/man/sd_booted.html +func IsRunningSystemd() bool { + fi, err := os.Lstat("/run/systemd/system") + if err != nil { + return false + } + return fi.IsDir() +} diff --git a/vendor/github.com/docker/containerd/.gitignore b/vendor/github.com/docker/containerd/.gitignore new file mode 100644 index 00000000..4109a6d5 --- /dev/null +++ b/vendor/github.com/docker/containerd/.gitignore @@ -0,0 +1,7 @@ +containerd/containerd +containerd-shim/containerd-shim +bin/ +ctr/ctr +hack/benchmark +*.exe +integration-test/test-artifacts diff --git a/vendor/github.com/docker/containerd/CONTRIBUTING.md b/vendor/github.com/docker/containerd/CONTRIBUTING.md new file mode 100644 index 00000000..b8a512c3 --- /dev/null +++ b/vendor/github.com/docker/containerd/CONTRIBUTING.md @@ -0,0 +1,55 @@ +# Contributing + +## Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/containerd/Dockerfile b/vendor/github.com/docker/containerd/Dockerfile new file mode 100644 index 00000000..23cf54b2 --- /dev/null +++ b/vendor/github.com/docker/containerd/Dockerfile @@ -0,0 +1,51 @@ +FROM debian:jessie + +RUN apt-get update && apt-get install -y \ + build-essential \ + ca-certificates \ + curl \ + git \ + make \ + jq \ + apparmor \ + libapparmor-dev \ + --no-install-recommends \ + && rm -rf /var/lib/apt/lists/* + +# Install Go +ENV GO_VERSION 1.5.3 +RUN curl -sSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar -v -C /usr/local -xz +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go:/go/src/github.com/docker/containerd/vendor + +WORKDIR /go/src/github.com/docker/containerd + +# install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.0 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install runc +ENV RUNC_COMMIT d49ece5a83da3dcb820121d6850e2b61bd0a5fbe +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone git://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ + && cd "$GOPATH/src/github.com/opencontainers/runc" \ + && git checkout -q "$RUNC_COMMIT" \ + && make BUILDTAGS="seccomp apparmor selinux" && make install + +COPY . /go/src/github.com/docker/containerd + +WORKDIR /go/src/github.com/docker/containerd + +RUN make all install diff --git a/vendor/github.com/docker/containerd/LICENSE.code b/vendor/github.com/docker/containerd/LICENSE.code new file mode 100644 index 00000000..8f3fee62 --- /dev/null +++ b/vendor/github.com/docker/containerd/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/containerd/LICENSE.docs b/vendor/github.com/docker/containerd/LICENSE.docs new file mode 100644 index 00000000..e26cd4fc --- /dev/null +++ b/vendor/github.com/docker/containerd/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/containerd/MAINTAINERS b/vendor/github.com/docker/containerd/MAINTAINERS new file mode 100644 index 00000000..77c5fa7b --- /dev/null +++ b/vendor/github.com/docker/containerd/MAINTAINERS @@ -0,0 +1,41 @@ +# Containerd maintainers file +# +# This file describes who runs the docker/containerd project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "crosbymichael", + "tonistiigi", + "mlaventure", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.crosbymichael] + Name = "Michael Crosby" + Email = "crosbymichael@gmail.com" + GitHub = "crosbymichael" + + [people.tonistiigi] + Name = "Tõnis Tiigi" + Email = "tonis@docker.com" + GitHub = "tonistiigi" + + [people.mlaventure] + Name = "Kenfe-Mickaël Laventure" + Email = "mickael.laventure@docker.com" + GitHub = "mlaventure" + diff --git a/vendor/github.com/docker/containerd/Makefile b/vendor/github.com/docker/containerd/Makefile new file mode 100644 index 00000000..81267b4e --- /dev/null +++ b/vendor/github.com/docker/containerd/Makefile @@ -0,0 +1,97 @@ +BUILDTAGS= + +PROJECT=github.com/docker/containerd + +GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true) +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2> /dev/null) + +LDFLAGS := -X github.com/docker/containerd.GitCommit=${GIT_COMMIT} ${LDFLAGS} + +TEST_TIMEOUT ?= 5m +TEST_SUITE_TIMEOUT ?= 10m + +# if this session isn't interactive, then we don't want to allocate a +# TTY, which would fail, but if it is interactive, we do want to attach +# so that the user can send e.g. ^C through. +INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) +ifeq ($(INTERACTIVE), 1) + DOCKER_FLAGS += -t +endif + +TEST_ARTIFACTS_DIR := integration-test/test-artifacts +BUNDLE_ARCHIVES_DIR := $(TEST_ARTIFACTS_DIR)/archives + +DOCKER_IMAGE := containerd-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH)) +DOCKER_RUN := docker run --privileged --rm -i $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" + + +export GOPATH:=$(CURDIR)/vendor:$(GOPATH) + +all: client daemon shim + +static: client-static daemon-static shim-static + +bin: + mkdir -p bin/ + +clean: + rm -rf bin + +client: bin + cd ctr && go build -ldflags "${LDFLAGS}" -o ../bin/ctr + +client-static: + cd ctr && go build -ldflags "-w -extldflags -static ${LDFLAGS}" -tags "$(BUILDTAGS)" -o ../bin/ctr + +daemon: bin + cd containerd && go build -ldflags "${LDFLAGS}" -tags "$(BUILDTAGS)" -o ../bin/containerd + +daemon-static: + cd containerd && go build -ldflags "-w -extldflags -static ${LDFLAGS}" -tags "$(BUILDTAGS)" -o ../bin/containerd + +shim: bin + cd containerd-shim && go build -tags "$(BUILDTAGS)" -ldflags "-w" -o ../bin/containerd-shim + +shim-static: + cd containerd-shim && go build -ldflags "-w -extldflags -static ${LDFLAGS}" -tags "$(BUILDTAGS)" -o ../bin/containerd-shim + +$(BUNDLE_ARCHIVES_DIR)/busybox.tar: + @mkdir -p $(BUNDLE_ARCHIVES_DIR) + curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.11/rootfs.tar' -o $(BUNDLE_ARCHIVES_DIR)/busybox.tar + +bundles-rootfs: $(BUNDLE_ARCHIVES_DIR)/busybox.tar + +dbuild: $(BUNDLE_ARCHIVES_DIR)/busybox.tar + @docker build --rm --force-rm -t "$(DOCKER_IMAGE)" . + +dtest: dbuild + $(DOCKER_RUN) make test + +install: + cp bin/* /usr/local/bin/ + +protoc: + protoc -I ./api/grpc/types ./api/grpc/types/api.proto --go_out=plugins=grpc:api/grpc/types + +fmt: + @gofmt -s -l . | grep -v vendor | grep -v .pb. | tee /dev/stderr + +lint: + @golint ./... | grep -v vendor | grep -v .pb. | tee /dev/stderr + +shell: dbuild + $(DOCKER_RUN) bash + +test: validate + go test -v $(shell go list ./... | grep -v /vendor | grep -v /integration-test) +ifneq ($(wildcard /.dockerenv), ) + $(MAKE) install bundles-rootfs + cd integration-test ; \ + go test -check.v -check.timeout=$(TEST_TIMEOUT) -timeout=$(TEST_SUITE_TIMEOUT) $(TESTFLAGS) github.com/docker/containerd/integration-test && \ + go test -containerd.shim="" -check.v -check.timeout=$(TEST_TIMEOUT) -timeout=$(TEST_SUITE_TIMEOUT) $(TESTFLAGS) github.com/docker/containerd/integration-test +endif + +validate: fmt + +uninstall: + $(foreach file,containerd containerd-shim ctr,rm /usr/local/bin/$(file);) diff --git a/vendor/github.com/docker/containerd/NOTICE b/vendor/github.com/docker/containerd/NOTICE new file mode 100644 index 00000000..8915f027 --- /dev/null +++ b/vendor/github.com/docker/containerd/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/containerd/README.md b/vendor/github.com/docker/containerd/README.md new file mode 100644 index 00000000..6413bdc4 --- /dev/null +++ b/vendor/github.com/docker/containerd/README.md @@ -0,0 +1,72 @@ +# containerd + +containerd is a daemon to control runC, built for performance and density. +containerd leverages runC's advanced features such as seccomp and user namespace support as well +as checkpoint and restore for cloning and live migration of containers. + +## Getting started + +The easiest way to start using containerd is to download binaries from the [releases page](https://github.com/docker/containerd/releases). + +The included `ctr` command-line tool allows you interact with the containerd daemon: + +``` +$ sudo ctr containers start redis /containers/redis +$ sudo ctr containers list +ID PATH STATUS PROCESSES +redis /containers/redis running 14063 +``` + +`/containers/redis` is the path to an OCI bundle. [See the docs for more information.](docs/bundle.md) + +## Docs + + * [Client CLI reference (`ctr`)](docs/cli.md) + * [Daemon CLI reference (`containerd`)](docs/daemon.md) + * [Creating OCI bundles](docs/bundle.md) + * [containerd changes to the bundle](docs/bundle-changes.md) + * [Attaching to STDIO or TTY](docs/attach.md) + * [Telemetry and metrics](docs/telemetry.md) + +All documentation is contained in the `/docs` directory in this repository. + +## Building + +You will need to make sure that you have Go installed on your system and the containerd repository is cloned +in your `$GOPATH`. You will also need to make sure that you have all the dependencies cloned as well. +Currently, contributing to containerd is not for the first time devs as many dependencies are not vendored and +work is being completed at a high rate. + +After that just run `make` and the binaries for the daemon and client will be localed in the `bin/` directory. + +## Performance + +Starting 1000 containers concurrently runs at 126-140 containers per second. + +Overall start times: + +``` +[containerd] 2015/12/04 15:00:54 count: 1000 +[containerd] 2015/12/04 14:59:54 min: 23ms +[containerd] 2015/12/04 14:59:54 max: 355ms +[containerd] 2015/12/04 14:59:54 mean: 78ms +[containerd] 2015/12/04 14:59:54 stddev: 34ms +[containerd] 2015/12/04 14:59:54 median: 73ms +[containerd] 2015/12/04 14:59:54 75%: 91ms +[containerd] 2015/12/04 14:59:54 95%: 123ms +[containerd] 2015/12/04 14:59:54 99%: 287ms +[containerd] 2015/12/04 14:59:54 99.9%: 355ms +``` + +## Roadmap + +The current roadmap and milestones for alpha and beta completion are in the github issues on this repository. Please refer to these issues for what is being worked on and completed for the various stages of development. + +## Copyright and license + +Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code +is released under the Apache 2.0 license. The README.md file, and files in the +"docs" folder are licensed under the Creative Commons Attribution 4.0 +International License under the terms and conditions set forth in the file +"LICENSE.docs". You may obtain a duplicate copy of the same license, titled +CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/containerd/api/grpc/server/server.go b/vendor/github.com/docker/containerd/api/grpc/server/server.go new file mode 100644 index 00000000..0fef7ba7 --- /dev/null +++ b/vendor/github.com/docker/containerd/api/grpc/server/server.go @@ -0,0 +1,462 @@ +package server + +import ( + "bufio" + "errors" + "fmt" + "os" + "strconv" + "strings" + "syscall" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + + "github.com/docker/containerd" + "github.com/docker/containerd/api/grpc/types" + "github.com/docker/containerd/runtime" + "github.com/docker/containerd/supervisor" + "golang.org/x/net/context" +) + +type apiServer struct { + sv *supervisor.Supervisor +} + +// NewServer returns grpc server instance +func NewServer(sv *supervisor.Supervisor) types.APIServer { + return &apiServer{ + sv: sv, + } +} + +func (s *apiServer) GetServerVersion(ctx context.Context, c *types.GetServerVersionRequest) (*types.GetServerVersionResponse, error) { + return &types.GetServerVersionResponse{ + Major: containerd.VersionMajor, + Minor: containerd.VersionMinor, + Patch: containerd.VersionPatch, + Revision: containerd.GitCommit, + }, nil +} + +func (s *apiServer) CreateContainer(ctx context.Context, c *types.CreateContainerRequest) (*types.CreateContainerResponse, error) { + if c.BundlePath == "" { + return nil, errors.New("empty bundle path") + } + e := &supervisor.StartTask{} + e.ID = c.Id + e.BundlePath = c.BundlePath + e.Stdin = c.Stdin + e.Stdout = c.Stdout + e.Stderr = c.Stderr + e.Labels = c.Labels + e.NoPivotRoot = c.NoPivotRoot + e.StartResponse = make(chan supervisor.StartResponse, 1) + if c.Checkpoint != "" { + e.Checkpoint = &runtime.Checkpoint{ + Name: c.Checkpoint, + } + } + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + r := <-e.StartResponse + apiC, err := createAPIContainer(r.Container, false) + if err != nil { + return nil, err + } + return &types.CreateContainerResponse{ + Container: apiC, + }, nil +} + +func (s *apiServer) CreateCheckpoint(ctx context.Context, r *types.CreateCheckpointRequest) (*types.CreateCheckpointResponse, error) { + e := &supervisor.CreateCheckpointTask{} + e.ID = r.Id + e.Checkpoint = &runtime.Checkpoint{ + Name: r.Checkpoint.Name, + Exit: r.Checkpoint.Exit, + Tcp: r.Checkpoint.Tcp, + UnixSockets: r.Checkpoint.UnixSockets, + Shell: r.Checkpoint.Shell, + } + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.CreateCheckpointResponse{}, nil +} + +func (s *apiServer) DeleteCheckpoint(ctx context.Context, r *types.DeleteCheckpointRequest) (*types.DeleteCheckpointResponse, error) { + if r.Name == "" { + return nil, errors.New("checkpoint name cannot be empty") + } + e := &supervisor.DeleteCheckpointTask{} + e.ID = r.Id + e.Checkpoint = &runtime.Checkpoint{ + Name: r.Name, + } + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.DeleteCheckpointResponse{}, nil +} + +func (s *apiServer) ListCheckpoint(ctx context.Context, r *types.ListCheckpointRequest) (*types.ListCheckpointResponse, error) { + e := &supervisor.GetContainersTask{} + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + var container runtime.Container + for _, c := range e.Containers { + if c.ID() == r.Id { + container = c + break + } + } + if container == nil { + return nil, grpc.Errorf(codes.NotFound, "no such containers") + } + var out []*types.Checkpoint + checkpoints, err := container.Checkpoints() + if err != nil { + return nil, err + } + for _, c := range checkpoints { + out = append(out, &types.Checkpoint{ + Name: c.Name, + Tcp: c.Tcp, + Shell: c.Shell, + UnixSockets: c.UnixSockets, + // TODO: figure out timestamp + //Timestamp: c.Timestamp, + }) + } + return &types.ListCheckpointResponse{Checkpoints: out}, nil +} + +func (s *apiServer) Signal(ctx context.Context, r *types.SignalRequest) (*types.SignalResponse, error) { + e := &supervisor.SignalTask{} + e.ID = r.Id + e.PID = r.Pid + e.Signal = syscall.Signal(int(r.Signal)) + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.SignalResponse{}, nil +} + +func (s *apiServer) State(ctx context.Context, r *types.StateRequest) (*types.StateResponse, error) { + e := &supervisor.GetContainersTask{} + e.ID = r.Id + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + m := s.sv.Machine() + state := &types.StateResponse{ + Machine: &types.Machine{ + Cpus: uint32(m.Cpus), + Memory: uint64(m.Memory), + }, + } + for _, c := range e.Containers { + apiC, err := createAPIContainer(c, true) + if err != nil { + return nil, err + } + state.Containers = append(state.Containers, apiC) + } + return state, nil +} + +func createAPIContainer(c runtime.Container, getPids bool) (*types.Container, error) { + processes, err := c.Processes() + if err != nil { + return nil, grpc.Errorf(codes.Internal, "get processes for container: "+err.Error()) + } + var procs []*types.Process + for _, p := range processes { + oldProc := p.Spec() + stdio := p.Stdio() + proc := &types.Process{ + Pid: p.ID(), + SystemPid: uint32(p.SystemPid()), + Terminal: oldProc.Terminal, + Args: oldProc.Args, + Env: oldProc.Env, + Cwd: oldProc.Cwd, + Stdin: stdio.Stdin, + Stdout: stdio.Stdout, + Stderr: stdio.Stderr, + } + proc.User = &types.User{ + Uid: oldProc.User.UID, + Gid: oldProc.User.GID, + AdditionalGids: oldProc.User.AdditionalGids, + } + proc.Capabilities = oldProc.Capabilities + proc.ApparmorProfile = oldProc.ApparmorProfile + proc.SelinuxLabel = oldProc.SelinuxLabel + proc.NoNewPrivileges = oldProc.NoNewPrivileges + for _, rl := range oldProc.Rlimits { + proc.Rlimits = append(proc.Rlimits, &types.Rlimit{ + Type: rl.Type, + Soft: rl.Soft, + Hard: rl.Hard, + }) + } + procs = append(procs, proc) + } + var pids []int + state := c.State() + if getPids && (state == runtime.Running || state == runtime.Paused) { + if pids, err = c.Pids(); err != nil { + return nil, grpc.Errorf(codes.Internal, "get all pids for container: "+err.Error()) + } + } + return &types.Container{ + Id: c.ID(), + BundlePath: c.Path(), + Processes: procs, + Labels: c.Labels(), + Status: string(state), + Pids: toUint32(pids), + Runtime: c.Runtime(), + }, nil +} + +func toUint32(its []int) []uint32 { + o := []uint32{} + for _, i := range its { + o = append(o, uint32(i)) + } + return o +} + +func (s *apiServer) UpdateContainer(ctx context.Context, r *types.UpdateContainerRequest) (*types.UpdateContainerResponse, error) { + e := &supervisor.UpdateTask{} + e.ID = r.Id + e.State = runtime.State(r.Status) + if r.Resources != nil { + rs := r.Resources + e.Resources = &runtime.Resource{} + if rs.CpuShares != 0 { + e.Resources.CPUShares = int64(rs.CpuShares) + } + if rs.BlkioWeight != 0 { + e.Resources.BlkioWeight = uint16(rs.BlkioWeight) + } + if rs.CpuPeriod != 0 { + e.Resources.CPUPeriod = int64(rs.CpuPeriod) + } + if rs.CpuQuota != 0 { + e.Resources.CPUQuota = int64(rs.CpuQuota) + } + if rs.CpusetCpus != "" { + e.Resources.CpusetCpus = rs.CpusetCpus + } + if rs.CpusetMems != "" { + e.Resources.CpusetMems = rs.CpusetMems + } + if rs.KernelMemoryLimit != 0 { + e.Resources.KernelMemory = int64(rs.KernelMemoryLimit) + } + if rs.MemoryLimit != 0 { + e.Resources.Memory = int64(rs.MemoryLimit) + } + if rs.MemoryReservation != 0 { + e.Resources.MemoryReservation = int64(rs.MemoryReservation) + } + if rs.MemorySwap != 0 { + e.Resources.MemorySwap = int64(rs.MemorySwap) + } + } + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.UpdateContainerResponse{}, nil +} + +func (s *apiServer) UpdateProcess(ctx context.Context, r *types.UpdateProcessRequest) (*types.UpdateProcessResponse, error) { + e := &supervisor.UpdateProcessTask{} + e.ID = r.Id + e.PID = r.Pid + e.Height = int(r.Height) + e.Width = int(r.Width) + e.CloseStdin = r.CloseStdin + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.UpdateProcessResponse{}, nil +} + +func (s *apiServer) Events(r *types.EventsRequest, stream types.API_EventsServer) error { + t := time.Time{} + if r.Timestamp != 0 { + t = time.Unix(int64(r.Timestamp), 0) + } + events := s.sv.Events(t) + defer s.sv.Unsubscribe(events) + for e := range events { + if err := stream.Send(&types.Event{ + Id: e.ID, + Type: e.Type, + Timestamp: uint64(e.Timestamp.Unix()), + Pid: e.PID, + Status: uint32(e.Status), + }); err != nil { + return err + } + } + return nil +} + +func convertToPb(st *runtime.Stat) *types.StatsResponse { + pbSt := &types.StatsResponse{ + Timestamp: uint64(st.Timestamp.Unix()), + CgroupStats: &types.CgroupStats{}, + } + systemUsage, _ := getSystemCPUUsage() + pbSt.CgroupStats.CpuStats = &types.CpuStats{ + CpuUsage: &types.CpuUsage{ + TotalUsage: st.Cpu.Usage.Total, + PercpuUsage: st.Cpu.Usage.Percpu, + UsageInKernelmode: st.Cpu.Usage.Kernel, + UsageInUsermode: st.Cpu.Usage.User, + }, + ThrottlingData: &types.ThrottlingData{ + Periods: st.Cpu.Throttling.Periods, + ThrottledPeriods: st.Cpu.Throttling.ThrottledPeriods, + ThrottledTime: st.Cpu.Throttling.ThrottledTime, + }, + SystemUsage: systemUsage, + } + pbSt.CgroupStats.MemoryStats = &types.MemoryStats{ + Cache: st.Memory.Cache, + Usage: &types.MemoryData{ + Usage: st.Memory.Usage.Usage, + MaxUsage: st.Memory.Usage.Max, + Failcnt: st.Memory.Usage.Failcnt, + Limit: st.Memory.Usage.Limit, + }, + SwapUsage: &types.MemoryData{ + Usage: st.Memory.Swap.Usage, + MaxUsage: st.Memory.Swap.Max, + Failcnt: st.Memory.Swap.Failcnt, + Limit: st.Memory.Swap.Limit, + }, + KernelUsage: &types.MemoryData{ + Usage: st.Memory.Kernel.Usage, + MaxUsage: st.Memory.Kernel.Max, + Failcnt: st.Memory.Kernel.Failcnt, + Limit: st.Memory.Kernel.Limit, + }, + Stats: st.Memory.Raw, + } + pbSt.CgroupStats.BlkioStats = &types.BlkioStats{ + IoServiceBytesRecursive: convertBlkioEntryToPb(st.Blkio.IoServiceBytesRecursive), + IoServicedRecursive: convertBlkioEntryToPb(st.Blkio.IoServicedRecursive), + IoQueuedRecursive: convertBlkioEntryToPb(st.Blkio.IoQueuedRecursive), + IoServiceTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoServiceTimeRecursive), + IoWaitTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoWaitTimeRecursive), + IoMergedRecursive: convertBlkioEntryToPb(st.Blkio.IoMergedRecursive), + IoTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoTimeRecursive), + SectorsRecursive: convertBlkioEntryToPb(st.Blkio.SectorsRecursive), + } + pbSt.CgroupStats.HugetlbStats = make(map[string]*types.HugetlbStats) + for k, st := range st.Hugetlb { + pbSt.CgroupStats.HugetlbStats[k] = &types.HugetlbStats{ + Usage: st.Usage, + MaxUsage: st.Max, + Failcnt: st.Failcnt, + } + } + pbSt.CgroupStats.PidsStats = &types.PidsStats{ + Current: st.Pids.Current, + Limit: st.Pids.Limit, + } + return pbSt +} + +func convertBlkioEntryToPb(b []runtime.BlkioEntry) []*types.BlkioStatsEntry { + var pbEs []*types.BlkioStatsEntry + for _, e := range b { + pbEs = append(pbEs, &types.BlkioStatsEntry{ + Major: e.Major, + Minor: e.Minor, + Op: e.Op, + Value: e.Value, + }) + } + return pbEs +} + +const nanoSecondsPerSecond = 1e9 + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. +// +// Uses /proc/stat defined by POSIX. Looks for the cpu +// statistics line and then sums up the first seven fields +// provided. See `man 5 proc` for details on specific field +// information. +func getSystemCPUUsage() (uint64, error) { + var line string + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + bufReader := bufio.NewReaderSize(nil, 128) + defer func() { + bufReader.Reset(nil) + f.Close() + }() + bufReader.Reset(f) + err = nil + for err == nil { + line, err = bufReader.ReadString('\n') + if err != nil { + break + } + parts := strings.Fields(line) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("bad format of cpu stats") + } + var totalClockTicks uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0, fmt.Errorf("error parsing cpu stats") + } + totalClockTicks += v + } + return (totalClockTicks * nanoSecondsPerSecond) / + clockTicksPerSecond, nil + } + } + return 0, fmt.Errorf("bad stats format") +} + +func (s *apiServer) Stats(ctx context.Context, r *types.StatsRequest) (*types.StatsResponse, error) { + e := &supervisor.StatsTask{} + e.ID = r.Id + e.Stat = make(chan *runtime.Stat, 1) + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + stats := <-e.Stat + t := convertToPb(stats) + return t, nil +} diff --git a/vendor/github.com/docker/containerd/api/grpc/server/server_linux.go b/vendor/github.com/docker/containerd/api/grpc/server/server_linux.go new file mode 100644 index 00000000..1051f1f0 --- /dev/null +++ b/vendor/github.com/docker/containerd/api/grpc/server/server_linux.go @@ -0,0 +1,59 @@ +package server + +import ( + "fmt" + + "github.com/docker/containerd/api/grpc/types" + "github.com/docker/containerd/specs" + "github.com/docker/containerd/supervisor" + "github.com/opencontainers/runc/libcontainer/system" + ocs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +var clockTicksPerSecond = uint64(system.GetClockTicks()) + +func (s *apiServer) AddProcess(ctx context.Context, r *types.AddProcessRequest) (*types.AddProcessResponse, error) { + process := &specs.ProcessSpec{ + Terminal: r.Terminal, + Args: r.Args, + Env: r.Env, + Cwd: r.Cwd, + } + process.User = ocs.User{ + UID: r.User.Uid, + GID: r.User.Gid, + AdditionalGids: r.User.AdditionalGids, + } + process.Capabilities = r.Capabilities + process.ApparmorProfile = r.ApparmorProfile + process.SelinuxLabel = r.SelinuxLabel + process.NoNewPrivileges = r.NoNewPrivileges + for _, rl := range r.Rlimits { + process.Rlimits = append(process.Rlimits, ocs.Rlimit{ + Type: rl.Type, + Soft: rl.Soft, + Hard: rl.Hard, + }) + } + if r.Id == "" { + return nil, fmt.Errorf("container id cannot be empty") + } + if r.Pid == "" { + return nil, fmt.Errorf("process id cannot be empty") + } + e := &supervisor.AddProcessTask{} + e.ID = r.Id + e.PID = r.Pid + e.ProcessSpec = process + e.Stdin = r.Stdin + e.Stdout = r.Stdout + e.Stderr = r.Stderr + e.StartResponse = make(chan supervisor.StartResponse, 1) + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + <-e.StartResponse + return &types.AddProcessResponse{}, nil +} diff --git a/vendor/github.com/docker/containerd/api/grpc/server/server_solaris.go b/vendor/github.com/docker/containerd/api/grpc/server/server_solaris.go new file mode 100644 index 00000000..bf64b9a2 --- /dev/null +++ b/vendor/github.com/docker/containerd/api/grpc/server/server_solaris.go @@ -0,0 +1,14 @@ +package server + +import ( + "errors" + + "github.com/docker/containerd/api/grpc/types" + "golang.org/x/net/context" +) + +var clockTicksPerSecond uint64 + +func (s *apiServer) AddProcess(ctx context.Context, r *types.AddProcessRequest) (*types.AddProcessResponse, error) { + return &types.AddProcessResponse{}, errors.New("apiServer AddProcess() not implemented on Solaris") +} diff --git a/vendor/github.com/docker/containerd/api/grpc/types/api.pb.go b/vendor/github.com/docker/containerd/api/grpc/types/api.pb.go new file mode 100644 index 00000000..d0851d43 --- /dev/null +++ b/vendor/github.com/docker/containerd/api/grpc/types/api.pb.go @@ -0,0 +1,1430 @@ +// Code generated by protoc-gen-go. +// source: api.proto +// DO NOT EDIT! + +/* +Package types is a generated protocol buffer package. + +It is generated from these files: + api.proto + +It has these top-level messages: + GetServerVersionRequest + GetServerVersionResponse + UpdateProcessRequest + UpdateProcessResponse + CreateContainerRequest + CreateContainerResponse + SignalRequest + SignalResponse + AddProcessRequest + Rlimit + User + AddProcessResponse + CreateCheckpointRequest + CreateCheckpointResponse + DeleteCheckpointRequest + DeleteCheckpointResponse + ListCheckpointRequest + Checkpoint + ListCheckpointResponse + StateRequest + ContainerState + Process + Container + Machine + StateResponse + UpdateContainerRequest + UpdateResource + UpdateContainerResponse + EventsRequest + Event + NetworkStats + CpuUsage + ThrottlingData + CpuStats + PidsStats + MemoryData + MemoryStats + BlkioStatsEntry + BlkioStats + HugetlbStats + CgroupStats + StatsResponse + StatsRequest +*/ +package types + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + +type GetServerVersionRequest struct { +} + +func (m *GetServerVersionRequest) Reset() { *m = GetServerVersionRequest{} } +func (m *GetServerVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetServerVersionRequest) ProtoMessage() {} +func (*GetServerVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type GetServerVersionResponse struct { + Major uint32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch uint32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + Revision string `protobuf:"bytes,4,opt,name=revision" json:"revision,omitempty"` +} + +func (m *GetServerVersionResponse) Reset() { *m = GetServerVersionResponse{} } +func (m *GetServerVersionResponse) String() string { return proto.CompactTextString(m) } +func (*GetServerVersionResponse) ProtoMessage() {} +func (*GetServerVersionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type UpdateProcessRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` + CloseStdin bool `protobuf:"varint,3,opt,name=closeStdin" json:"closeStdin,omitempty"` + Width uint32 `protobuf:"varint,4,opt,name=width" json:"width,omitempty"` + Height uint32 `protobuf:"varint,5,opt,name=height" json:"height,omitempty"` +} + +func (m *UpdateProcessRequest) Reset() { *m = UpdateProcessRequest{} } +func (m *UpdateProcessRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateProcessRequest) ProtoMessage() {} +func (*UpdateProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type UpdateProcessResponse struct { +} + +func (m *UpdateProcessResponse) Reset() { *m = UpdateProcessResponse{} } +func (m *UpdateProcessResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateProcessResponse) ProtoMessage() {} +func (*UpdateProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +type CreateContainerRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` + Checkpoint string `protobuf:"bytes,3,opt,name=checkpoint" json:"checkpoint,omitempty"` + Stdin string `protobuf:"bytes,4,opt,name=stdin" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,5,opt,name=stdout" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,6,opt,name=stderr" json:"stderr,omitempty"` + Labels []string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty"` + NoPivotRoot bool `protobuf:"varint,8,opt,name=noPivotRoot" json:"noPivotRoot,omitempty"` +} + +func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} } +func (m *CreateContainerRequest) String() string { return proto.CompactTextString(m) } +func (*CreateContainerRequest) ProtoMessage() {} +func (*CreateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +type CreateContainerResponse struct { + Container *Container `protobuf:"bytes,1,opt,name=container" json:"container,omitempty"` +} + +func (m *CreateContainerResponse) Reset() { *m = CreateContainerResponse{} } +func (m *CreateContainerResponse) String() string { return proto.CompactTextString(m) } +func (*CreateContainerResponse) ProtoMessage() {} +func (*CreateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *CreateContainerResponse) GetContainer() *Container { + if m != nil { + return m.Container + } + return nil +} + +type SignalRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` + Signal uint32 `protobuf:"varint,3,opt,name=signal" json:"signal,omitempty"` +} + +func (m *SignalRequest) Reset() { *m = SignalRequest{} } +func (m *SignalRequest) String() string { return proto.CompactTextString(m) } +func (*SignalRequest) ProtoMessage() {} +func (*SignalRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +type SignalResponse struct { +} + +func (m *SignalResponse) Reset() { *m = SignalResponse{} } +func (m *SignalResponse) String() string { return proto.CompactTextString(m) } +func (*SignalResponse) ProtoMessage() {} +func (*SignalResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type AddProcessRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"` + User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` + Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` + Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"` + Pid string `protobuf:"bytes,7,opt,name=pid" json:"pid,omitempty"` + Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"` + Capabilities []string `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"` + ApparmorProfile string `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"` + SelinuxLabel string `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"` + NoNewPrivileges bool `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"` + Rlimits []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"` +} + +func (m *AddProcessRequest) Reset() { *m = AddProcessRequest{} } +func (m *AddProcessRequest) String() string { return proto.CompactTextString(m) } +func (*AddProcessRequest) ProtoMessage() {} +func (*AddProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *AddProcessRequest) GetUser() *User { + if m != nil { + return m.User + } + return nil +} + +func (m *AddProcessRequest) GetRlimits() []*Rlimit { + if m != nil { + return m.Rlimits + } + return nil +} + +type Rlimit struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Soft uint64 `protobuf:"varint,2,opt,name=soft" json:"soft,omitempty"` + Hard uint64 `protobuf:"varint,3,opt,name=hard" json:"hard,omitempty"` +} + +func (m *Rlimit) Reset() { *m = Rlimit{} } +func (m *Rlimit) String() string { return proto.CompactTextString(m) } +func (*Rlimit) ProtoMessage() {} +func (*Rlimit) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +type User struct { + Uid uint32 `protobuf:"varint,1,opt,name=uid" json:"uid,omitempty"` + Gid uint32 `protobuf:"varint,2,opt,name=gid" json:"gid,omitempty"` + AdditionalGids []uint32 `protobuf:"varint,3,rep,name=additionalGids" json:"additionalGids,omitempty"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +type AddProcessResponse struct { +} + +func (m *AddProcessResponse) Reset() { *m = AddProcessResponse{} } +func (m *AddProcessResponse) String() string { return proto.CompactTextString(m) } +func (*AddProcessResponse) ProtoMessage() {} +func (*AddProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +type CreateCheckpointRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Checkpoint *Checkpoint `protobuf:"bytes,2,opt,name=checkpoint" json:"checkpoint,omitempty"` +} + +func (m *CreateCheckpointRequest) Reset() { *m = CreateCheckpointRequest{} } +func (m *CreateCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*CreateCheckpointRequest) ProtoMessage() {} +func (*CreateCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *CreateCheckpointRequest) GetCheckpoint() *Checkpoint { + if m != nil { + return m.Checkpoint + } + return nil +} + +type CreateCheckpointResponse struct { +} + +func (m *CreateCheckpointResponse) Reset() { *m = CreateCheckpointResponse{} } +func (m *CreateCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*CreateCheckpointResponse) ProtoMessage() {} +func (*CreateCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +type DeleteCheckpointRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteCheckpointRequest) Reset() { *m = DeleteCheckpointRequest{} } +func (m *DeleteCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteCheckpointRequest) ProtoMessage() {} +func (*DeleteCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +type DeleteCheckpointResponse struct { +} + +func (m *DeleteCheckpointResponse) Reset() { *m = DeleteCheckpointResponse{} } +func (m *DeleteCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteCheckpointResponse) ProtoMessage() {} +func (*DeleteCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +type ListCheckpointRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` +} + +func (m *ListCheckpointRequest) Reset() { *m = ListCheckpointRequest{} } +func (m *ListCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*ListCheckpointRequest) ProtoMessage() {} +func (*ListCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +type Checkpoint struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Exit bool `protobuf:"varint,2,opt,name=exit" json:"exit,omitempty"` + Tcp bool `protobuf:"varint,3,opt,name=tcp" json:"tcp,omitempty"` + UnixSockets bool `protobuf:"varint,4,opt,name=unixSockets" json:"unixSockets,omitempty"` + Shell bool `protobuf:"varint,5,opt,name=shell" json:"shell,omitempty"` +} + +func (m *Checkpoint) Reset() { *m = Checkpoint{} } +func (m *Checkpoint) String() string { return proto.CompactTextString(m) } +func (*Checkpoint) ProtoMessage() {} +func (*Checkpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +type ListCheckpointResponse struct { + Checkpoints []*Checkpoint `protobuf:"bytes,1,rep,name=checkpoints" json:"checkpoints,omitempty"` +} + +func (m *ListCheckpointResponse) Reset() { *m = ListCheckpointResponse{} } +func (m *ListCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*ListCheckpointResponse) ProtoMessage() {} +func (*ListCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *ListCheckpointResponse) GetCheckpoints() []*Checkpoint { + if m != nil { + return m.Checkpoints + } + return nil +} + +type StateRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` +} + +func (m *StateRequest) Reset() { *m = StateRequest{} } +func (m *StateRequest) String() string { return proto.CompactTextString(m) } +func (*StateRequest) ProtoMessage() {} +func (*StateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +type ContainerState struct { + Status string `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` +} + +func (m *ContainerState) Reset() { *m = ContainerState{} } +func (m *ContainerState) String() string { return proto.CompactTextString(m) } +func (*ContainerState) ProtoMessage() {} +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +type Process struct { + Pid string `protobuf:"bytes,1,opt,name=pid" json:"pid,omitempty"` + Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"` + User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` + Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` + Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"` + SystemPid uint32 `protobuf:"varint,7,opt,name=systemPid" json:"systemPid,omitempty"` + Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"` + Capabilities []string `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"` + ApparmorProfile string `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"` + SelinuxLabel string `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"` + NoNewPrivileges bool `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"` + Rlimits []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"` +} + +func (m *Process) Reset() { *m = Process{} } +func (m *Process) String() string { return proto.CompactTextString(m) } +func (*Process) ProtoMessage() {} +func (*Process) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *Process) GetUser() *User { + if m != nil { + return m.User + } + return nil +} + +func (m *Process) GetRlimits() []*Rlimit { + if m != nil { + return m.Rlimits + } + return nil +} + +type Container struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` + Processes []*Process `protobuf:"bytes,3,rep,name=processes" json:"processes,omitempty"` + Status string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"` + Labels []string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"` + Pids []uint32 `protobuf:"varint,6,rep,name=pids" json:"pids,omitempty"` + Runtime string `protobuf:"bytes,7,opt,name=runtime" json:"runtime,omitempty"` +} + +func (m *Container) Reset() { *m = Container{} } +func (m *Container) String() string { return proto.CompactTextString(m) } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *Container) GetProcesses() []*Process { + if m != nil { + return m.Processes + } + return nil +} + +// Machine is information about machine on which containerd is run +type Machine struct { + Cpus uint32 `protobuf:"varint,1,opt,name=cpus" json:"cpus,omitempty"` + Memory uint64 `protobuf:"varint,2,opt,name=memory" json:"memory,omitempty"` +} + +func (m *Machine) Reset() { *m = Machine{} } +func (m *Machine) String() string { return proto.CompactTextString(m) } +func (*Machine) ProtoMessage() {} +func (*Machine) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +// StateResponse is information about containerd daemon +type StateResponse struct { + Containers []*Container `protobuf:"bytes,1,rep,name=containers" json:"containers,omitempty"` + Machine *Machine `protobuf:"bytes,2,opt,name=machine" json:"machine,omitempty"` +} + +func (m *StateResponse) Reset() { *m = StateResponse{} } +func (m *StateResponse) String() string { return proto.CompactTextString(m) } +func (*StateResponse) ProtoMessage() {} +func (*StateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *StateResponse) GetContainers() []*Container { + if m != nil { + return m.Containers + } + return nil +} + +func (m *StateResponse) GetMachine() *Machine { + if m != nil { + return m.Machine + } + return nil +} + +type UpdateContainerRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` + Status string `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` + Resources *UpdateResource `protobuf:"bytes,4,opt,name=resources" json:"resources,omitempty"` +} + +func (m *UpdateContainerRequest) Reset() { *m = UpdateContainerRequest{} } +func (m *UpdateContainerRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateContainerRequest) ProtoMessage() {} +func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *UpdateContainerRequest) GetResources() *UpdateResource { + if m != nil { + return m.Resources + } + return nil +} + +type UpdateResource struct { + BlkioWeight uint32 `protobuf:"varint,1,opt,name=blkioWeight" json:"blkioWeight,omitempty"` + CpuShares uint32 `protobuf:"varint,2,opt,name=cpuShares" json:"cpuShares,omitempty"` + CpuPeriod uint32 `protobuf:"varint,3,opt,name=cpuPeriod" json:"cpuPeriod,omitempty"` + CpuQuota uint32 `protobuf:"varint,4,opt,name=cpuQuota" json:"cpuQuota,omitempty"` + CpusetCpus string `protobuf:"bytes,5,opt,name=cpusetCpus" json:"cpusetCpus,omitempty"` + CpusetMems string `protobuf:"bytes,6,opt,name=cpusetMems" json:"cpusetMems,omitempty"` + MemoryLimit uint32 `protobuf:"varint,7,opt,name=memoryLimit" json:"memoryLimit,omitempty"` + MemorySwap uint32 `protobuf:"varint,8,opt,name=memorySwap" json:"memorySwap,omitempty"` + MemoryReservation uint32 `protobuf:"varint,9,opt,name=memoryReservation" json:"memoryReservation,omitempty"` + KernelMemoryLimit uint32 `protobuf:"varint,10,opt,name=kernelMemoryLimit" json:"kernelMemoryLimit,omitempty"` +} + +func (m *UpdateResource) Reset() { *m = UpdateResource{} } +func (m *UpdateResource) String() string { return proto.CompactTextString(m) } +func (*UpdateResource) ProtoMessage() {} +func (*UpdateResource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +type UpdateContainerResponse struct { +} + +func (m *UpdateContainerResponse) Reset() { *m = UpdateContainerResponse{} } +func (m *UpdateContainerResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateContainerResponse) ProtoMessage() {} +func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +type EventsRequest struct { + Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp" json:"timestamp,omitempty"` +} + +func (m *EventsRequest) Reset() { *m = EventsRequest{} } +func (m *EventsRequest) String() string { return proto.CompactTextString(m) } +func (*EventsRequest) ProtoMessage() {} +func (*EventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +type Event struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + Status uint32 `protobuf:"varint,3,opt,name=status" json:"status,omitempty"` + Pid string `protobuf:"bytes,4,opt,name=pid" json:"pid,omitempty"` + Timestamp uint64 `protobuf:"varint,5,opt,name=timestamp" json:"timestamp,omitempty"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +type NetworkStats struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes" json:"rx_bytes,omitempty"` + Rx_Packets uint64 `protobuf:"varint,3,opt,name=rx_Packets" json:"rx_Packets,omitempty"` + RxErrors uint64 `protobuf:"varint,4,opt,name=Rx_errors" json:"Rx_errors,omitempty"` + RxDropped uint64 `protobuf:"varint,5,opt,name=Rx_dropped" json:"Rx_dropped,omitempty"` + TxBytes uint64 `protobuf:"varint,6,opt,name=Tx_bytes" json:"Tx_bytes,omitempty"` + TxPackets uint64 `protobuf:"varint,7,opt,name=Tx_packets" json:"Tx_packets,omitempty"` + TxErrors uint64 `protobuf:"varint,8,opt,name=Tx_errors" json:"Tx_errors,omitempty"` + TxDropped uint64 `protobuf:"varint,9,opt,name=Tx_dropped" json:"Tx_dropped,omitempty"` +} + +func (m *NetworkStats) Reset() { *m = NetworkStats{} } +func (m *NetworkStats) String() string { return proto.CompactTextString(m) } +func (*NetworkStats) ProtoMessage() {} +func (*NetworkStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type CpuUsage struct { + TotalUsage uint64 `protobuf:"varint,1,opt,name=total_usage" json:"total_usage,omitempty"` + PercpuUsage []uint64 `protobuf:"varint,2,rep,name=percpu_usage" json:"percpu_usage,omitempty"` + UsageInKernelmode uint64 `protobuf:"varint,3,opt,name=usage_in_kernelmode" json:"usage_in_kernelmode,omitempty"` + UsageInUsermode uint64 `protobuf:"varint,4,opt,name=usage_in_usermode" json:"usage_in_usermode,omitempty"` +} + +func (m *CpuUsage) Reset() { *m = CpuUsage{} } +func (m *CpuUsage) String() string { return proto.CompactTextString(m) } +func (*CpuUsage) ProtoMessage() {} +func (*CpuUsage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +type ThrottlingData struct { + Periods uint64 `protobuf:"varint,1,opt,name=periods" json:"periods,omitempty"` + ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods" json:"throttled_periods,omitempty"` + ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time" json:"throttled_time,omitempty"` +} + +func (m *ThrottlingData) Reset() { *m = ThrottlingData{} } +func (m *ThrottlingData) String() string { return proto.CompactTextString(m) } +func (*ThrottlingData) ProtoMessage() {} +func (*ThrottlingData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +type CpuStats struct { + CpuUsage *CpuUsage `protobuf:"bytes,1,opt,name=cpu_usage" json:"cpu_usage,omitempty"` + ThrottlingData *ThrottlingData `protobuf:"bytes,2,opt,name=throttling_data" json:"throttling_data,omitempty"` + SystemUsage uint64 `protobuf:"varint,3,opt,name=system_usage" json:"system_usage,omitempty"` +} + +func (m *CpuStats) Reset() { *m = CpuStats{} } +func (m *CpuStats) String() string { return proto.CompactTextString(m) } +func (*CpuStats) ProtoMessage() {} +func (*CpuStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *CpuStats) GetCpuUsage() *CpuUsage { + if m != nil { + return m.CpuUsage + } + return nil +} + +func (m *CpuStats) GetThrottlingData() *ThrottlingData { + if m != nil { + return m.ThrottlingData + } + return nil +} + +type PidsStats struct { + Current uint64 `protobuf:"varint,1,opt,name=current" json:"current,omitempty"` + Limit uint64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"` +} + +func (m *PidsStats) Reset() { *m = PidsStats{} } +func (m *PidsStats) String() string { return proto.CompactTextString(m) } +func (*PidsStats) ProtoMessage() {} +func (*PidsStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +type MemoryData struct { + Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` + MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage" json:"max_usage,omitempty"` + Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` + Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` +} + +func (m *MemoryData) Reset() { *m = MemoryData{} } +func (m *MemoryData) String() string { return proto.CompactTextString(m) } +func (*MemoryData) ProtoMessage() {} +func (*MemoryData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +type MemoryStats struct { + Cache uint64 `protobuf:"varint,1,opt,name=cache" json:"cache,omitempty"` + Usage *MemoryData `protobuf:"bytes,2,opt,name=usage" json:"usage,omitempty"` + SwapUsage *MemoryData `protobuf:"bytes,3,opt,name=swap_usage" json:"swap_usage,omitempty"` + KernelUsage *MemoryData `protobuf:"bytes,4,opt,name=kernel_usage" json:"kernel_usage,omitempty"` + Stats map[string]uint64 `protobuf:"bytes,5,rep,name=stats" json:"stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *MemoryStats) Reset() { *m = MemoryStats{} } +func (m *MemoryStats) String() string { return proto.CompactTextString(m) } +func (*MemoryStats) ProtoMessage() {} +func (*MemoryStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *MemoryStats) GetUsage() *MemoryData { + if m != nil { + return m.Usage + } + return nil +} + +func (m *MemoryStats) GetSwapUsage() *MemoryData { + if m != nil { + return m.SwapUsage + } + return nil +} + +func (m *MemoryStats) GetKernelUsage() *MemoryData { + if m != nil { + return m.KernelUsage + } + return nil +} + +func (m *MemoryStats) GetStats() map[string]uint64 { + if m != nil { + return m.Stats + } + return nil +} + +type BlkioStatsEntry struct { + Major uint64 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor uint64 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Op string `protobuf:"bytes,3,opt,name=op" json:"op,omitempty"` + Value uint64 `protobuf:"varint,4,opt,name=value" json:"value,omitempty"` +} + +func (m *BlkioStatsEntry) Reset() { *m = BlkioStatsEntry{} } +func (m *BlkioStatsEntry) String() string { return proto.CompactTextString(m) } +func (*BlkioStatsEntry) ProtoMessage() {} +func (*BlkioStatsEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +type BlkioStats struct { + IoServiceBytesRecursive []*BlkioStatsEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive" json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []*BlkioStatsEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive" json:"io_serviced_recursive,omitempty"` + IoQueuedRecursive []*BlkioStatsEntry `protobuf:"bytes,3,rep,name=io_queued_recursive" json:"io_queued_recursive,omitempty"` + IoServiceTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive" json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive" json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []*BlkioStatsEntry `protobuf:"bytes,6,rep,name=io_merged_recursive" json:"io_merged_recursive,omitempty"` + IoTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,7,rep,name=io_time_recursive" json:"io_time_recursive,omitempty"` + SectorsRecursive []*BlkioStatsEntry `protobuf:"bytes,8,rep,name=sectors_recursive" json:"sectors_recursive,omitempty"` +} + +func (m *BlkioStats) Reset() { *m = BlkioStats{} } +func (m *BlkioStats) String() string { return proto.CompactTextString(m) } +func (*BlkioStats) ProtoMessage() {} +func (*BlkioStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *BlkioStats) GetIoServiceBytesRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoServiceBytesRecursive + } + return nil +} + +func (m *BlkioStats) GetIoServicedRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoServicedRecursive + } + return nil +} + +func (m *BlkioStats) GetIoQueuedRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoQueuedRecursive + } + return nil +} + +func (m *BlkioStats) GetIoServiceTimeRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoServiceTimeRecursive + } + return nil +} + +func (m *BlkioStats) GetIoWaitTimeRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoWaitTimeRecursive + } + return nil +} + +func (m *BlkioStats) GetIoMergedRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoMergedRecursive + } + return nil +} + +func (m *BlkioStats) GetIoTimeRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoTimeRecursive + } + return nil +} + +func (m *BlkioStats) GetSectorsRecursive() []*BlkioStatsEntry { + if m != nil { + return m.SectorsRecursive + } + return nil +} + +type HugetlbStats struct { + Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` + MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage" json:"max_usage,omitempty"` + Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` + Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` +} + +func (m *HugetlbStats) Reset() { *m = HugetlbStats{} } +func (m *HugetlbStats) String() string { return proto.CompactTextString(m) } +func (*HugetlbStats) ProtoMessage() {} +func (*HugetlbStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +type CgroupStats struct { + CpuStats *CpuStats `protobuf:"bytes,1,opt,name=cpu_stats" json:"cpu_stats,omitempty"` + MemoryStats *MemoryStats `protobuf:"bytes,2,opt,name=memory_stats" json:"memory_stats,omitempty"` + BlkioStats *BlkioStats `protobuf:"bytes,3,opt,name=blkio_stats" json:"blkio_stats,omitempty"` + HugetlbStats map[string]*HugetlbStats `protobuf:"bytes,4,rep,name=hugetlb_stats" json:"hugetlb_stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + PidsStats *PidsStats `protobuf:"bytes,5,opt,name=pids_stats" json:"pids_stats,omitempty"` +} + +func (m *CgroupStats) Reset() { *m = CgroupStats{} } +func (m *CgroupStats) String() string { return proto.CompactTextString(m) } +func (*CgroupStats) ProtoMessage() {} +func (*CgroupStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *CgroupStats) GetCpuStats() *CpuStats { + if m != nil { + return m.CpuStats + } + return nil +} + +func (m *CgroupStats) GetMemoryStats() *MemoryStats { + if m != nil { + return m.MemoryStats + } + return nil +} + +func (m *CgroupStats) GetBlkioStats() *BlkioStats { + if m != nil { + return m.BlkioStats + } + return nil +} + +func (m *CgroupStats) GetHugetlbStats() map[string]*HugetlbStats { + if m != nil { + return m.HugetlbStats + } + return nil +} + +func (m *CgroupStats) GetPidsStats() *PidsStats { + if m != nil { + return m.PidsStats + } + return nil +} + +type StatsResponse struct { + NetworkStats []*NetworkStats `protobuf:"bytes,1,rep,name=network_stats" json:"network_stats,omitempty"` + CgroupStats *CgroupStats `protobuf:"bytes,2,opt,name=cgroup_stats" json:"cgroup_stats,omitempty"` + Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp" json:"timestamp,omitempty"` +} + +func (m *StatsResponse) Reset() { *m = StatsResponse{} } +func (m *StatsResponse) String() string { return proto.CompactTextString(m) } +func (*StatsResponse) ProtoMessage() {} +func (*StatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +func (m *StatsResponse) GetNetworkStats() []*NetworkStats { + if m != nil { + return m.NetworkStats + } + return nil +} + +func (m *StatsResponse) GetCgroupStats() *CgroupStats { + if m != nil { + return m.CgroupStats + } + return nil +} + +type StatsRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` +} + +func (m *StatsRequest) Reset() { *m = StatsRequest{} } +func (m *StatsRequest) String() string { return proto.CompactTextString(m) } +func (*StatsRequest) ProtoMessage() {} +func (*StatsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } + +func init() { + proto.RegisterType((*GetServerVersionRequest)(nil), "types.GetServerVersionRequest") + proto.RegisterType((*GetServerVersionResponse)(nil), "types.GetServerVersionResponse") + proto.RegisterType((*UpdateProcessRequest)(nil), "types.UpdateProcessRequest") + proto.RegisterType((*UpdateProcessResponse)(nil), "types.UpdateProcessResponse") + proto.RegisterType((*CreateContainerRequest)(nil), "types.CreateContainerRequest") + proto.RegisterType((*CreateContainerResponse)(nil), "types.CreateContainerResponse") + proto.RegisterType((*SignalRequest)(nil), "types.SignalRequest") + proto.RegisterType((*SignalResponse)(nil), "types.SignalResponse") + proto.RegisterType((*AddProcessRequest)(nil), "types.AddProcessRequest") + proto.RegisterType((*Rlimit)(nil), "types.Rlimit") + proto.RegisterType((*User)(nil), "types.User") + proto.RegisterType((*AddProcessResponse)(nil), "types.AddProcessResponse") + proto.RegisterType((*CreateCheckpointRequest)(nil), "types.CreateCheckpointRequest") + proto.RegisterType((*CreateCheckpointResponse)(nil), "types.CreateCheckpointResponse") + proto.RegisterType((*DeleteCheckpointRequest)(nil), "types.DeleteCheckpointRequest") + proto.RegisterType((*DeleteCheckpointResponse)(nil), "types.DeleteCheckpointResponse") + proto.RegisterType((*ListCheckpointRequest)(nil), "types.ListCheckpointRequest") + proto.RegisterType((*Checkpoint)(nil), "types.Checkpoint") + proto.RegisterType((*ListCheckpointResponse)(nil), "types.ListCheckpointResponse") + proto.RegisterType((*StateRequest)(nil), "types.StateRequest") + proto.RegisterType((*ContainerState)(nil), "types.ContainerState") + proto.RegisterType((*Process)(nil), "types.Process") + proto.RegisterType((*Container)(nil), "types.Container") + proto.RegisterType((*Machine)(nil), "types.Machine") + proto.RegisterType((*StateResponse)(nil), "types.StateResponse") + proto.RegisterType((*UpdateContainerRequest)(nil), "types.UpdateContainerRequest") + proto.RegisterType((*UpdateResource)(nil), "types.UpdateResource") + proto.RegisterType((*UpdateContainerResponse)(nil), "types.UpdateContainerResponse") + proto.RegisterType((*EventsRequest)(nil), "types.EventsRequest") + proto.RegisterType((*Event)(nil), "types.Event") + proto.RegisterType((*NetworkStats)(nil), "types.NetworkStats") + proto.RegisterType((*CpuUsage)(nil), "types.CpuUsage") + proto.RegisterType((*ThrottlingData)(nil), "types.ThrottlingData") + proto.RegisterType((*CpuStats)(nil), "types.CpuStats") + proto.RegisterType((*PidsStats)(nil), "types.PidsStats") + proto.RegisterType((*MemoryData)(nil), "types.MemoryData") + proto.RegisterType((*MemoryStats)(nil), "types.MemoryStats") + proto.RegisterType((*BlkioStatsEntry)(nil), "types.BlkioStatsEntry") + proto.RegisterType((*BlkioStats)(nil), "types.BlkioStats") + proto.RegisterType((*HugetlbStats)(nil), "types.HugetlbStats") + proto.RegisterType((*CgroupStats)(nil), "types.CgroupStats") + proto.RegisterType((*StatsResponse)(nil), "types.StatsResponse") + proto.RegisterType((*StatsRequest)(nil), "types.StatsRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion2 + +// Client API for API service + +type APIClient interface { + GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error) + CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) + UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) + Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error) + UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) + AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) + CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error) + DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error) + ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error) + State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) + Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error) + Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) +} + +type aPIClient struct { + cc *grpc.ClientConn +} + +func NewAPIClient(cc *grpc.ClientConn) APIClient { + return &aPIClient{cc} +} + +func (c *aPIClient) GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error) { + out := new(GetServerVersionResponse) + err := grpc.Invoke(ctx, "/types.API/GetServerVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) { + out := new(CreateContainerResponse) + err := grpc.Invoke(ctx, "/types.API/CreateContainer", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) { + out := new(UpdateContainerResponse) + err := grpc.Invoke(ctx, "/types.API/UpdateContainer", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error) { + out := new(SignalResponse) + err := grpc.Invoke(ctx, "/types.API/Signal", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) { + out := new(UpdateProcessResponse) + err := grpc.Invoke(ctx, "/types.API/UpdateProcess", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) { + out := new(AddProcessResponse) + err := grpc.Invoke(ctx, "/types.API/AddProcess", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error) { + out := new(CreateCheckpointResponse) + err := grpc.Invoke(ctx, "/types.API/CreateCheckpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error) { + out := new(DeleteCheckpointResponse) + err := grpc.Invoke(ctx, "/types.API/DeleteCheckpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error) { + out := new(ListCheckpointResponse) + err := grpc.Invoke(ctx, "/types.API/ListCheckpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) { + out := new(StateResponse) + err := grpc.Invoke(ctx, "/types.API/State", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_API_serviceDesc.Streams[0], c.cc, "/types.API/Events", opts...) + if err != nil { + return nil, err + } + x := &aPIEventsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type API_EventsClient interface { + Recv() (*Event, error) + grpc.ClientStream +} + +type aPIEventsClient struct { + grpc.ClientStream +} + +func (x *aPIEventsClient) Recv() (*Event, error) { + m := new(Event) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *aPIClient) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) { + out := new(StatsResponse) + err := grpc.Invoke(ctx, "/types.API/Stats", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for API service + +type APIServer interface { + GetServerVersion(context.Context, *GetServerVersionRequest) (*GetServerVersionResponse, error) + CreateContainer(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) + UpdateContainer(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) + Signal(context.Context, *SignalRequest) (*SignalResponse, error) + UpdateProcess(context.Context, *UpdateProcessRequest) (*UpdateProcessResponse, error) + AddProcess(context.Context, *AddProcessRequest) (*AddProcessResponse, error) + CreateCheckpoint(context.Context, *CreateCheckpointRequest) (*CreateCheckpointResponse, error) + DeleteCheckpoint(context.Context, *DeleteCheckpointRequest) (*DeleteCheckpointResponse, error) + ListCheckpoint(context.Context, *ListCheckpointRequest) (*ListCheckpointResponse, error) + State(context.Context, *StateRequest) (*StateResponse, error) + Events(*EventsRequest, API_EventsServer) error + Stats(context.Context, *StatsRequest) (*StatsResponse, error) +} + +func RegisterAPIServer(s *grpc.Server, srv APIServer) { + s.RegisterService(&_API_serviceDesc, srv) +} + +func _API_GetServerVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServerVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).GetServerVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/GetServerVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).GetServerVersion(ctx, req.(*GetServerVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_CreateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).CreateContainer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/CreateContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).CreateContainer(ctx, req.(*CreateContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_UpdateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).UpdateContainer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/UpdateContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).UpdateContainer(ctx, req.(*UpdateContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_Signal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SignalRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).Signal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/Signal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Signal(ctx, req.(*SignalRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_UpdateProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateProcessRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).UpdateProcess(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/UpdateProcess", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).UpdateProcess(ctx, req.(*UpdateProcessRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_AddProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddProcessRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).AddProcess(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/AddProcess", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).AddProcess(ctx, req.(*AddProcessRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_CreateCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCheckpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).CreateCheckpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/CreateCheckpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).CreateCheckpoint(ctx, req.(*CreateCheckpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_DeleteCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteCheckpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).DeleteCheckpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/DeleteCheckpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).DeleteCheckpoint(ctx, req.(*DeleteCheckpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_ListCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCheckpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).ListCheckpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/ListCheckpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).ListCheckpoint(ctx, req.(*ListCheckpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).State(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/State", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).State(ctx, req.(*StateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_Events_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(EventsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(APIServer).Events(m, &aPIEventsServer{stream}) +} + +type API_EventsServer interface { + Send(*Event) error + grpc.ServerStream +} + +type aPIEventsServer struct { + grpc.ServerStream +} + +func (x *aPIEventsServer) Send(m *Event) error { + return x.ServerStream.SendMsg(m) +} + +func _API_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).Stats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/Stats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Stats(ctx, req.(*StatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _API_serviceDesc = grpc.ServiceDesc{ + ServiceName: "types.API", + HandlerType: (*APIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetServerVersion", + Handler: _API_GetServerVersion_Handler, + }, + { + MethodName: "CreateContainer", + Handler: _API_CreateContainer_Handler, + }, + { + MethodName: "UpdateContainer", + Handler: _API_UpdateContainer_Handler, + }, + { + MethodName: "Signal", + Handler: _API_Signal_Handler, + }, + { + MethodName: "UpdateProcess", + Handler: _API_UpdateProcess_Handler, + }, + { + MethodName: "AddProcess", + Handler: _API_AddProcess_Handler, + }, + { + MethodName: "CreateCheckpoint", + Handler: _API_CreateCheckpoint_Handler, + }, + { + MethodName: "DeleteCheckpoint", + Handler: _API_DeleteCheckpoint_Handler, + }, + { + MethodName: "ListCheckpoint", + Handler: _API_ListCheckpoint_Handler, + }, + { + MethodName: "State", + Handler: _API_State_Handler, + }, + { + MethodName: "Stats", + Handler: _API_Stats_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Events", + Handler: _API_Events_Handler, + ServerStreams: true, + }, + }, +} + +var fileDescriptor0 = []byte{ + // 1879 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x58, 0x5b, 0x8f, 0x23, 0x47, + 0x15, 0x5e, 0xdf, 0xed, 0xe3, 0xcb, 0xec, 0xf4, 0xce, 0xc5, 0xeb, 0x90, 0xcd, 0xd2, 0x09, 0x64, + 0x05, 0xd1, 0x28, 0x78, 0xb9, 0x84, 0x20, 0x21, 0xc2, 0x24, 0x4a, 0x40, 0xb3, 0x8b, 0x33, 0xb3, + 0x13, 0xc4, 0x93, 0xd5, 0xee, 0xae, 0xb5, 0x8b, 0x69, 0x77, 0x37, 0x55, 0xd5, 0x9e, 0x99, 0x1f, + 0xc1, 0x7f, 0xe0, 0x1d, 0x09, 0xf1, 0xc4, 0x0f, 0xe0, 0xb7, 0xf0, 0x84, 0xc4, 0x7f, 0xe0, 0xd4, + 0xad, 0xdd, 0xdd, 0xbe, 0x0c, 0x12, 0xe2, 0x81, 0x17, 0xcb, 0x55, 0x75, 0xea, 0x3b, 0xe7, 0x7c, + 0xe7, 0x52, 0x55, 0x0d, 0x1d, 0x2f, 0xa1, 0x67, 0x09, 0x8b, 0x45, 0xec, 0x34, 0xc4, 0x7d, 0x42, + 0xb8, 0xfb, 0x14, 0x4e, 0xbf, 0x24, 0xe2, 0x8a, 0xb0, 0x15, 0x61, 0xdf, 0x10, 0xc6, 0x69, 0x1c, + 0x5d, 0x92, 0x3f, 0xa4, 0x84, 0x0b, 0xf7, 0x77, 0x30, 0xdc, 0x5c, 0xe2, 0x49, 0x1c, 0x71, 0xe2, + 0xf4, 0xa1, 0xb1, 0xf4, 0x7e, 0x1f, 0xb3, 0x61, 0xe5, 0x79, 0xe5, 0x45, 0x5f, 0x0d, 0x69, 0x84, + 0xc3, 0xaa, 0x1d, 0x26, 0x9e, 0xf0, 0x17, 0xc3, 0x9a, 0x1a, 0x3e, 0x86, 0x36, 0x23, 0x2b, 0x2a, + 0x01, 0x86, 0x75, 0x9c, 0xe9, 0xb8, 0x33, 0x38, 0xba, 0x4e, 0x02, 0x4f, 0x90, 0x09, 0x8b, 0x7d, + 0xc2, 0xb9, 0x51, 0xe9, 0x00, 0x54, 0x69, 0xa0, 0x30, 0x3b, 0x4e, 0x17, 0x6a, 0x09, 0x0e, 0xaa, + 0x6a, 0x80, 0x2b, 0x7e, 0x18, 0x73, 0x72, 0x25, 0x02, 0x1a, 0x29, 0xd8, 0xb6, 0xd4, 0x72, 0x4b, + 0x03, 0xb1, 0x50, 0x98, 0x7d, 0x67, 0x00, 0xcd, 0x05, 0xa1, 0xf3, 0x85, 0x18, 0x36, 0xe4, 0xd8, + 0x3d, 0x85, 0xe3, 0x92, 0x0e, 0x6d, 0xbb, 0xfb, 0xa7, 0x0a, 0x9c, 0x9c, 0x33, 0x82, 0x2b, 0xe7, + 0x71, 0x24, 0x3c, 0x1a, 0x11, 0xb6, 0x4d, 0x3f, 0x0e, 0x66, 0x69, 0x14, 0x84, 0x64, 0xe2, 0xa1, + 0x8e, 0xb5, 0x19, 0x0b, 0xe2, 0xdf, 0x24, 0x31, 0x8d, 0x84, 0x32, 0xa3, 0x23, 0xcd, 0xe0, 0xca, + 0x2a, 0xe5, 0x9a, 0x34, 0x03, 0x87, 0x71, 0xaa, 0xcd, 0xb0, 0x63, 0xc2, 0xd8, 0xb0, 0x69, 0xc7, + 0xa1, 0x37, 0x23, 0x21, 0x1f, 0xb6, 0x9e, 0xd7, 0x70, 0xfc, 0x04, 0xba, 0x51, 0x3c, 0xa1, 0xab, + 0x58, 0x5c, 0xc6, 0xb1, 0x18, 0xb6, 0xa5, 0x6b, 0xee, 0xcf, 0xe1, 0x74, 0xc3, 0x42, 0xc3, 0xfc, + 0xfb, 0xd0, 0xf1, 0xed, 0xa4, 0xb2, 0xb4, 0x3b, 0x7e, 0x7c, 0xa6, 0x62, 0x79, 0x96, 0x09, 0xbb, + 0x9f, 0x40, 0xff, 0x8a, 0xce, 0x23, 0x2f, 0x7c, 0x90, 0x58, 0x69, 0x9e, 0x92, 0xd4, 0xb1, 0x72, + 0x1f, 0xc3, 0xc0, 0xee, 0x34, 0x74, 0xfd, 0xa5, 0x0a, 0x87, 0x9f, 0x05, 0xc1, 0x9e, 0x48, 0x61, + 0x7c, 0x05, 0x61, 0x98, 0x00, 0x88, 0x52, 0x55, 0xa1, 0x79, 0x0a, 0xf5, 0x94, 0xa3, 0x7d, 0x35, + 0x65, 0x5f, 0xd7, 0xd8, 0x77, 0x8d, 0x53, 0x4e, 0x0f, 0xea, 0x1e, 0x9b, 0x73, 0x64, 0xab, 0xa6, + 0x6d, 0x21, 0xd1, 0x0a, 0xa9, 0x32, 0x03, 0xff, 0x36, 0x30, 0x3c, 0x19, 0x2b, 0x5b, 0x45, 0x8e, + 0xdb, 0x25, 0x8e, 0x3b, 0x25, 0x8e, 0x41, 0x8d, 0x8f, 0xa0, 0xe7, 0x7b, 0x89, 0x37, 0xa3, 0x21, + 0x15, 0x94, 0xf0, 0x61, 0x57, 0xc1, 0x9f, 0xc2, 0x81, 0x97, 0x24, 0x1e, 0x5b, 0xc6, 0x0c, 0x9d, + 0x79, 0x4b, 0x43, 0x32, 0xec, 0x59, 0x71, 0x4e, 0x42, 0x1a, 0xa5, 0x77, 0x17, 0x32, 0x32, 0xc3, + 0xbe, 0x9a, 0x45, 0xf1, 0x28, 0x7e, 0x4d, 0x6e, 0x27, 0x8c, 0xae, 0x50, 0x76, 0x8e, 0x38, 0x03, + 0xe5, 0xdc, 0x33, 0x68, 0xb1, 0x90, 0x2e, 0xa9, 0xe0, 0xc3, 0x03, 0x04, 0xee, 0x8e, 0xfb, 0xc6, + 0xbf, 0x4b, 0x35, 0xeb, 0x8e, 0xa1, 0xa9, 0xff, 0x49, 0x5f, 0xe5, 0x8a, 0xa1, 0x09, 0x47, 0x3c, + 0x7e, 0x2b, 0x14, 0x45, 0x75, 0x39, 0x5a, 0x78, 0x2c, 0x50, 0x14, 0xd5, 0x31, 0x60, 0x75, 0xc5, + 0x0e, 0x7a, 0x9d, 0x1a, 0x5e, 0xfb, 0x72, 0x30, 0x37, 0x81, 0xea, 0x3b, 0x27, 0x30, 0xf0, 0x82, + 0x00, 0xfd, 0x89, 0x91, 0xe6, 0x2f, 0x69, 0xc0, 0x71, 0x67, 0x0d, 0x03, 0x76, 0x04, 0x4e, 0x3e, + 0x3a, 0x26, 0x68, 0x17, 0x59, 0x02, 0x65, 0xe9, 0xba, 0x2d, 0x72, 0xdf, 0x29, 0xe4, 0x73, 0x55, + 0x45, 0xeb, 0xd0, 0x66, 0x53, 0xb6, 0xe0, 0x8e, 0x60, 0xb8, 0x89, 0x66, 0x34, 0xbd, 0x84, 0xd3, + 0xcf, 0x49, 0x48, 0x1e, 0xd2, 0x84, 0xee, 0x46, 0xde, 0x92, 0xe8, 0xac, 0x93, 0x80, 0x9b, 0x9b, + 0x0c, 0xe0, 0xfb, 0x70, 0x7c, 0x41, 0xb9, 0xd8, 0x0b, 0x87, 0xbd, 0x09, 0xd6, 0x02, 0x19, 0x78, + 0xa6, 0x8a, 0xdc, 0x51, 0x61, 0x52, 0x11, 0x49, 0x14, 0x7e, 0x62, 0x5a, 0x06, 0x16, 0x5b, 0x1a, + 0xd1, 0xbb, 0xab, 0xd8, 0xbf, 0x21, 0x82, 0xab, 0x8a, 0x55, 0x7d, 0x84, 0x2f, 0x48, 0x18, 0xaa, + 0x82, 0x6d, 0xbb, 0xbf, 0x80, 0x93, 0xb2, 0x7e, 0x53, 0x7a, 0xdf, 0x85, 0xee, 0x9a, 0x2d, 0x8e, + 0xda, 0x6a, 0xbb, 0xe8, 0xea, 0x5d, 0x09, 0x64, 0x6b, 0x9b, 0xe1, 0xcf, 0x61, 0x90, 0x95, 0xa9, + 0x12, 0xd2, 0xc9, 0xeb, 0x89, 0x94, 0x1b, 0x89, 0x3f, 0x57, 0xa1, 0x65, 0xc2, 0x69, 0x8b, 0xe0, + 0x7f, 0x58, 0x66, 0x87, 0xd0, 0xe1, 0xf7, 0x5c, 0x90, 0xe5, 0xc4, 0x14, 0x5b, 0xff, 0xff, 0xab, + 0xd8, 0xfe, 0x58, 0x81, 0x4e, 0x46, 0xe8, 0x83, 0xfd, 0xfb, 0xdb, 0xd0, 0x49, 0x34, 0xb5, 0x44, + 0xd7, 0x4f, 0x77, 0x3c, 0x30, 0x78, 0x96, 0xf2, 0x75, 0x38, 0xea, 0xa5, 0x7e, 0xad, 0xd9, 0x43, + 0x62, 0x13, 0x59, 0x7d, 0x4d, 0x59, 0x7d, 0xce, 0x01, 0x9a, 0x97, 0x46, 0x82, 0x62, 0xf2, 0xa9, + 0x4e, 0xe5, 0x7e, 0x08, 0xad, 0x57, 0x9e, 0xbf, 0x40, 0x6b, 0xa4, 0xa4, 0x9f, 0x98, 0xb0, 0xaa, + 0xe3, 0x69, 0x49, 0x90, 0x8d, 0x7b, 0x5d, 0xff, 0xee, 0x37, 0xd8, 0xa2, 0x75, 0x92, 0x98, 0xec, + 0xfa, 0x00, 0x6b, 0xd1, 0x3a, 0x62, 0x93, 0x6b, 0xa3, 0xb3, 0x3b, 0xef, 0x41, 0x6b, 0xa9, 0xf1, + 0x4d, 0xb9, 0x5a, 0xfb, 0x8d, 0x56, 0xf7, 0x06, 0x4e, 0xf4, 0xb1, 0xb7, 0xf7, 0x70, 0xdb, 0x38, + 0x03, 0xb4, 0xcb, 0xfa, 0x44, 0x7b, 0x01, 0x1d, 0x46, 0x78, 0x9c, 0x32, 0x24, 0x44, 0xb1, 0xd0, + 0x1d, 0x1f, 0xdb, 0xdc, 0x52, 0xd0, 0x97, 0x66, 0xd5, 0xfd, 0x47, 0x05, 0x06, 0xc5, 0x29, 0x59, + 0x62, 0xb3, 0xf0, 0x86, 0xc6, 0xbf, 0xd5, 0x67, 0xb1, 0x76, 0x1e, 0xb3, 0x0c, 0xa9, 0xb8, 0xc2, + 0x86, 0x87, 0x88, 0xd5, 0xdc, 0xd4, 0x84, 0x30, 0x1a, 0x07, 0xeb, 0x7b, 0x02, 0x4e, 0x7d, 0x9d, + 0xc6, 0xc2, 0x33, 0x67, 0xba, 0x3c, 0x6f, 0x91, 0x42, 0x22, 0xce, 0x25, 0x91, 0x8d, 0xec, 0x0c, + 0x56, 0x73, 0xaf, 0xc8, 0x92, 0x9b, 0x2c, 0x46, 0xa5, 0x9a, 0xdc, 0x0b, 0x99, 0x14, 0x26, 0x8f, + 0x51, 0x50, 0x4f, 0x5e, 0xdd, 0x7a, 0x89, 0x4a, 0xe6, 0x3e, 0x56, 0xcc, 0xa1, 0x9e, 0x43, 0x7b, + 0xf1, 0x62, 0xe3, 0xc9, 0x76, 0xaa, 0xf2, 0x5a, 0x2d, 0xdd, 0x10, 0x16, 0x91, 0xf0, 0x55, 0x0e, + 0x09, 0xd4, 0xa1, 0x88, 0x97, 0xa4, 0x0d, 0x4e, 0x4d, 0xb7, 0x72, 0xa1, 0xff, 0xc5, 0x8a, 0x60, + 0x3b, 0xb0, 0x2c, 0xa3, 0x5f, 0x32, 0x1d, 0x90, 0xd0, 0x65, 0xa2, 0xbc, 0xaf, 0xbb, 0x5f, 0x43, + 0x43, 0xc9, 0x94, 0xce, 0x03, 0x1d, 0x8f, 0x6d, 0x21, 0xe8, 0xdb, 0xf8, 0xd4, 0x6d, 0x8d, 0xae, + 0x21, 0x1b, 0x0a, 0xf2, 0x6f, 0x15, 0xe8, 0xbd, 0x26, 0xe2, 0x36, 0x66, 0x37, 0x32, 0x8b, 0x78, + 0xa9, 0x05, 0xca, 0x1b, 0xd7, 0xdd, 0x74, 0x76, 0x2f, 0x0c, 0xdd, 0x75, 0x49, 0x06, 0xce, 0x4c, + 0x3c, 0xdd, 0xf8, 0xd4, 0xa1, 0x23, 0x71, 0x2f, 0xef, 0xa6, 0x58, 0xc9, 0x31, 0xd3, 0x71, 0x56, + 0x62, 0x38, 0x15, 0xb0, 0x38, 0x49, 0x48, 0xa0, 0x75, 0x49, 0xb0, 0x37, 0x16, 0xac, 0x69, 0xa5, + 0x70, 0x26, 0x31, 0x60, 0x2d, 0x0b, 0xf6, 0x26, 0x03, 0x6b, 0xe7, 0xc4, 0x2c, 0x58, 0x47, 0x19, + 0xbe, 0x84, 0x36, 0xc6, 0xf2, 0x9a, 0x7b, 0x73, 0x95, 0x2a, 0x02, 0x63, 0x1d, 0x4e, 0x53, 0x39, + 0xd4, 0x64, 0xc9, 0xfe, 0x90, 0x10, 0x86, 0x11, 0x36, 0xb3, 0x55, 0x2c, 0x84, 0xba, 0xf3, 0x0e, + 0x3c, 0x51, 0xc3, 0x29, 0x8d, 0xa6, 0x3a, 0x4a, 0xcb, 0x38, 0x20, 0xc6, 0x0f, 0x8c, 0x5c, 0xb6, + 0x28, 0xfb, 0xa1, 0x5a, 0x52, 0xfe, 0xb8, 0x6f, 0x60, 0xf0, 0x66, 0x81, 0xf7, 0x5d, 0x81, 0x1d, + 0x67, 0xfe, 0xb9, 0x27, 0x3c, 0x59, 0xb1, 0x89, 0x4a, 0x3a, 0x6e, 0x14, 0xe2, 0x6e, 0xa1, 0x45, + 0x48, 0x30, 0xb5, 0x4b, 0x9a, 0x34, 0x3c, 0x73, 0xd7, 0x4b, 0xaa, 0xc8, 0xf5, 0x69, 0x2d, 0x94, + 0x13, 0x9a, 0x78, 0x57, 0xe5, 0x71, 0xce, 0x85, 0xee, 0xf8, 0xc0, 0x56, 0xad, 0x75, 0xf4, 0x0c, + 0x0e, 0x44, 0x66, 0xc5, 0x14, 0x13, 0xc9, 0x33, 0xc5, 0x6b, 0xcb, 0xaa, 0x64, 0xa3, 0xec, 0x91, + 0xaa, 0x29, 0x1b, 0x58, 0xad, 0xf5, 0xfb, 0xd0, 0xc1, 0x26, 0xcd, 0xb5, 0x5a, 0x74, 0xc3, 0x4f, + 0x19, 0xc3, 0xac, 0x32, 0x6e, 0x60, 0xd7, 0x56, 0x1d, 0xd1, 0xb4, 0x97, 0xd7, 0x00, 0x3a, 0x8f, + 0x15, 0x20, 0x2e, 0xe6, 0x39, 0xc6, 0x58, 0x2d, 0xbd, 0xbb, 0x8c, 0x60, 0x39, 0x85, 0x78, 0x6f, + 0x3d, 0x1a, 0xfa, 0xe6, 0x5a, 0x9b, 0xc3, 0xd3, 0x44, 0xfe, 0xb3, 0x02, 0x5d, 0x0d, 0xa8, 0xf5, + 0xe3, 0xb2, 0x8f, 0x1d, 0xc7, 0x22, 0x3e, 0xb7, 0x0a, 0x8a, 0x77, 0x88, 0x9c, 0x09, 0x78, 0xd5, + 0xe0, 0x58, 0x87, 0x39, 0x8f, 0xb6, 0x8a, 0x7d, 0x08, 0x3d, 0x1d, 0x5f, 0x23, 0x58, 0xdf, 0x25, + 0xf8, 0x91, 0x3c, 0xa5, 0xd0, 0x12, 0xd5, 0x96, 0xbb, 0xe3, 0x77, 0x0b, 0x12, 0xca, 0xc6, 0x33, + 0xf5, 0xfb, 0x45, 0x24, 0xd8, 0xfd, 0xe8, 0x23, 0x80, 0xf5, 0x48, 0x56, 0xd7, 0x0d, 0xb9, 0x37, + 0xb5, 0x82, 0x9e, 0xac, 0xbc, 0x30, 0x35, 0x44, 0x7c, 0x5a, 0xfd, 0xa4, 0xe2, 0xfe, 0x1a, 0x0e, + 0x7e, 0x29, 0x7b, 0x58, 0x6e, 0x4b, 0xe1, 0xc1, 0x53, 0x2f, 0x3e, 0x78, 0xea, 0xb2, 0x94, 0xe3, + 0x64, 0xfd, 0x1e, 0xd0, 0x78, 0x9a, 0xb8, 0xbf, 0xd7, 0x00, 0xd6, 0x60, 0xce, 0xa7, 0x30, 0xa2, + 0xf1, 0x54, 0xf6, 0x1e, 0xea, 0x13, 0x5d, 0x54, 0x53, 0x46, 0x30, 0x94, 0x9c, 0xae, 0x88, 0xe9, + 0xfa, 0x27, 0xc6, 0x97, 0xb2, 0x0d, 0x3f, 0x82, 0xe3, 0xf5, 0xde, 0x20, 0xb7, 0xad, 0xba, 0x77, + 0xdb, 0x4b, 0x78, 0x82, 0xdb, 0xb0, 0x3b, 0xa5, 0x85, 0x4d, 0xb5, 0xbd, 0x9b, 0x7e, 0x0a, 0x4f, + 0x73, 0x76, 0xca, 0xdc, 0xcf, 0x6d, 0xad, 0xef, 0xdd, 0xfa, 0x63, 0x38, 0xc1, 0xad, 0xb7, 0x1e, + 0x15, 0xe5, 0x7d, 0x8d, 0xff, 0xc0, 0xce, 0x25, 0x61, 0xf3, 0x82, 0x9d, 0xcd, 0xbd, 0x9b, 0x7e, + 0x00, 0x87, 0xb8, 0xa9, 0xa4, 0xa7, 0xf5, 0xd0, 0x16, 0x4e, 0x7c, 0x81, 0x7d, 0x2a, 0xb7, 0xa5, + 0xbd, 0x6f, 0x8b, 0x3b, 0x81, 0xde, 0x57, 0xe9, 0x9c, 0x88, 0x70, 0x96, 0x65, 0xff, 0x7f, 0x59, + 0x4f, 0x7f, 0xad, 0x42, 0xf7, 0x7c, 0xce, 0xe2, 0x34, 0x29, 0xb4, 0x11, 0x9d, 0xd2, 0x1b, 0x6d, + 0x44, 0xcb, 0xbc, 0x80, 0x9e, 0x3e, 0xbc, 0x8c, 0x98, 0xae, 0x35, 0x67, 0x33, 0xf3, 0xe5, 0x4d, + 0x55, 0x1d, 0xc2, 0x46, 0xb0, 0x58, 0x6d, 0xb9, 0x6c, 0xfc, 0x19, 0xf4, 0x17, 0xda, 0x2f, 0x23, + 0xa9, 0x23, 0xfb, 0x81, 0xd5, 0xbc, 0x36, 0xf0, 0x2c, 0xef, 0xbf, 0xe6, 0x11, 0x2f, 0x2c, 0xf2, + 0x26, 0x34, 0xb5, 0x65, 0x98, 0x7f, 0x8a, 0x66, 0x8d, 0x6a, 0xf4, 0x15, 0x1c, 0x6e, 0x6e, 0x2d, + 0x14, 0xa0, 0x9b, 0x2f, 0xc0, 0xee, 0xf8, 0x89, 0x81, 0xc8, 0xef, 0x52, 0x55, 0x79, 0xa7, 0x6f, + 0x4c, 0xd9, 0x23, 0xc7, 0xf9, 0x1e, 0xf4, 0x23, 0x7d, 0x06, 0x66, 0xbc, 0xd5, 0x72, 0x00, 0x85, + 0xf3, 0x11, 0xb9, 0xf3, 0x95, 0x37, 0x5b, 0xb9, 0xcb, 0x47, 0xa2, 0x70, 0xda, 0xea, 0xce, 0x6b, + 0x2e, 0xf4, 0xdb, 0x1e, 0xbf, 0xe3, 0x7f, 0x35, 0xa1, 0xf6, 0xd9, 0xe4, 0x57, 0xce, 0x35, 0x3c, + 0x2e, 0x7f, 0x2d, 0x71, 0x9e, 0x19, 0xf8, 0x1d, 0x5f, 0x58, 0x46, 0xef, 0xed, 0x5c, 0x37, 0xb7, + 0x8b, 0x47, 0xce, 0x25, 0x1c, 0x94, 0xbe, 0x04, 0x38, 0xb6, 0xd5, 0x6d, 0xff, 0x86, 0x31, 0x7a, + 0xb6, 0x6b, 0x39, 0x8f, 0x59, 0xba, 0xce, 0x64, 0x98, 0xdb, 0xaf, 0x8e, 0x19, 0xe6, 0xae, 0x5b, + 0xd0, 0x23, 0xe7, 0x27, 0xd0, 0xd4, 0xdf, 0x0d, 0x9c, 0x23, 0x23, 0x5b, 0xf8, 0x00, 0x31, 0x3a, + 0x2e, 0xcd, 0x66, 0x1b, 0x2f, 0xa0, 0x5f, 0xf8, 0x4c, 0xe3, 0xbc, 0x53, 0xd0, 0x55, 0xfc, 0xec, + 0x30, 0xfa, 0xd6, 0xf6, 0xc5, 0x0c, 0xed, 0x1c, 0x60, 0xfd, 0x1a, 0x76, 0x86, 0x46, 0x7a, 0xe3, + 0xf3, 0xc5, 0xe8, 0xe9, 0x96, 0x95, 0x0c, 0x04, 0x43, 0x59, 0x7e, 0xee, 0x3a, 0x25, 0x56, 0xcb, + 0x8f, 0xd3, 0x2c, 0x94, 0x3b, 0xdf, 0xc9, 0x0a, 0xb6, 0xfc, 0xe8, 0xcd, 0x60, 0x77, 0x3c, 0xa1, + 0x33, 0xd8, 0x9d, 0xaf, 0xe5, 0x47, 0xce, 0x6f, 0x60, 0x50, 0x7c, 0xaf, 0x3a, 0x96, 0xa4, 0xad, + 0xcf, 0xe8, 0xd1, 0xbb, 0x3b, 0x56, 0x33, 0xc0, 0x1f, 0x42, 0x43, 0xbf, 0x4c, 0x6d, 0x21, 0xe5, + 0x1f, 0xb3, 0xa3, 0xa3, 0xe2, 0x64, 0xb6, 0xeb, 0x63, 0x68, 0xea, 0x8b, 0x70, 0x96, 0x00, 0x85, + 0x7b, 0xf1, 0xa8, 0x97, 0x9f, 0x75, 0x1f, 0x7d, 0x5c, 0xb1, 0x7a, 0x78, 0x41, 0x0f, 0xdf, 0xa6, + 0x27, 0x17, 0x9c, 0x59, 0x53, 0x7d, 0xbe, 0x7c, 0xf9, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x65, + 0x85, 0xda, 0xbd, 0xcb, 0x14, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/containerd/api/grpc/types/api.proto b/vendor/github.com/docker/containerd/api/grpc/types/api.proto new file mode 100644 index 00000000..d6f56e10 --- /dev/null +++ b/vendor/github.com/docker/containerd/api/grpc/types/api.proto @@ -0,0 +1,305 @@ +syntax = "proto3"; + +package types; + +service API { + rpc GetServerVersion(GetServerVersionRequest) returns (GetServerVersionResponse) {} + rpc CreateContainer(CreateContainerRequest) returns (CreateContainerResponse) {} + rpc UpdateContainer(UpdateContainerRequest) returns (UpdateContainerResponse) {} + rpc Signal(SignalRequest) returns (SignalResponse) {} + rpc UpdateProcess(UpdateProcessRequest) returns (UpdateProcessResponse) {} + rpc AddProcess(AddProcessRequest) returns (AddProcessResponse) {} + rpc CreateCheckpoint(CreateCheckpointRequest) returns (CreateCheckpointResponse) {} + rpc DeleteCheckpoint(DeleteCheckpointRequest) returns (DeleteCheckpointResponse) {} + rpc ListCheckpoint(ListCheckpointRequest) returns (ListCheckpointResponse) {} + rpc State(StateRequest) returns (StateResponse) {} + rpc Events(EventsRequest) returns (stream Event) {} + rpc Stats(StatsRequest) returns (StatsResponse) {} +} + +message GetServerVersionRequest { +} + +message GetServerVersionResponse { + uint32 major = 1; + uint32 minor = 2; + uint32 patch = 3; + string revision = 4; +} + +message UpdateProcessRequest { + string id = 1; + string pid = 2; + bool closeStdin = 3; // Close stdin of the container + uint32 width = 4; + uint32 height = 5; +} + +message UpdateProcessResponse { +} + +message CreateContainerRequest { + string id = 1; // ID of container + string bundlePath = 2; // path to OCI bundle + string checkpoint = 3; // checkpoint name if you want to create immediate checkpoint (optional) + string stdin = 4; // path to the file where stdin will be read (optional) + string stdout = 5; // path to file where stdout will be written (optional) + string stderr = 6; // path to file where stderr will be written (optional) + repeated string labels = 7; + bool noPivotRoot = 8; +} + +message CreateContainerResponse { + Container container = 1; +} + +message SignalRequest { + string id = 1; // ID of container + string pid = 2; // PID of process inside container + uint32 signal = 3; // Signal which will be sent, you can find value in "man 7 signal" +} + +message SignalResponse { +} + +message AddProcessRequest { + string id = 1; // ID of container + bool terminal = 2; // Use tty for container stdio + User user = 3; // User under which process will be run + repeated string args = 4; // Arguments for process, first is binary path itself + repeated string env = 5; // List of environment variables for process + string cwd = 6; // Workind directory of process + string pid = 7; // Process ID + string stdin = 8; // path to the file where stdin will be read (optional) + string stdout = 9; // path to file where stdout will be written (optional) + string stderr = 10; // path to file where stderr will be written (optional) + repeated string capabilities = 11; + string apparmorProfile = 12; + string selinuxLabel = 13; + bool noNewPrivileges = 14; + repeated Rlimit rlimits = 15; +} + +message Rlimit { + string type = 1; + uint64 soft = 2; + uint64 hard = 3; +} + +message User { + uint32 uid = 1; // UID of user + uint32 gid = 2; // GID of user + repeated uint32 additionalGids = 3; // Additional groups to which user will be added +} + +message AddProcessResponse { +} + +message CreateCheckpointRequest { + string id = 1; // ID of container + Checkpoint checkpoint = 2; // Checkpoint configuration +} + +message CreateCheckpointResponse { +} + +message DeleteCheckpointRequest { + string id = 1; // ID of container + string name = 2; // Name of checkpoint +} + +message DeleteCheckpointResponse { +} + +message ListCheckpointRequest { + string id = 1; // ID of container +} + +message Checkpoint { + string name = 1; // Name of checkpoint + bool exit = 2; // checkpoint configuration: should container exit on checkpoint or not + bool tcp = 3; // allow open tcp connections + bool unixSockets = 4; // allow external unix sockets + bool shell = 5; // allow shell-jobs +} + +message ListCheckpointResponse { + repeated Checkpoint checkpoints = 1; // List of checkpoints +} + +message StateRequest { + string id = 1; // container id for a single container +} + +message ContainerState { + string status = 1; +} + +message Process { + string pid = 1; + bool terminal = 2; // Use tty for container stdio + User user = 3; // User under which process will be run + repeated string args = 4; // Arguments for process, first is binary path itself + repeated string env = 5; // List of environment variables for process + string cwd = 6; // Workind directory of process + uint32 systemPid = 7; + string stdin = 8; // path to the file where stdin will be read (optional) + string stdout = 9; // path to file where stdout will be written (optional) + string stderr = 10; // path to file where stderr will be written (optional) + repeated string capabilities = 11; + string apparmorProfile = 12; + string selinuxLabel = 13; + bool noNewPrivileges = 14; + repeated Rlimit rlimits = 15; +} + +message Container { + string id = 1; // ID of container + string bundlePath = 2; // Path to OCI bundle + repeated Process processes = 3; // List of processes which run in container + string status = 4; // Container status ("running", "paused", etc.) + repeated string labels = 5; + repeated uint32 pids = 6; + string runtime = 7; // runtime used to execute the container +} + +// Machine is information about machine on which containerd is run +message Machine { + uint32 cpus = 1; // number of cpus + uint64 memory = 2; // amount of memory +} + +// StateResponse is information about containerd daemon +message StateResponse { + repeated Container containers = 1; + Machine machine = 2; +} + +message UpdateContainerRequest { + string id = 1; // ID of container + string pid = 2; + string status = 3; // Status to whcih containerd will try to change + UpdateResource resources =4; +} + +message UpdateResource { + uint32 blkioWeight =1; + uint32 cpuShares = 2; + uint32 cpuPeriod = 3; + uint32 cpuQuota = 4; + string cpusetCpus = 5; + string cpusetMems = 6; + uint32 memoryLimit = 7; + uint32 memorySwap = 8; + uint32 memoryReservation = 9; + uint32 kernelMemoryLimit = 10; +} + +message UpdateContainerResponse { +} + +message EventsRequest { + uint64 timestamp = 1; +} + +message Event { + string type = 1; + string id = 2; + uint32 status = 3; + string pid = 4; + uint64 timestamp = 5; +} + +message NetworkStats { + string name = 1; // name of network interface + uint64 rx_bytes = 2; + uint64 rx_Packets = 3; + uint64 Rx_errors = 4; + uint64 Rx_dropped = 5; + uint64 Tx_bytes = 6; + uint64 Tx_packets = 7; + uint64 Tx_errors = 8; + uint64 Tx_dropped = 9; +} + +message CpuUsage { + uint64 total_usage = 1; + repeated uint64 percpu_usage = 2; + uint64 usage_in_kernelmode = 3; + uint64 usage_in_usermode = 4; +} + +message ThrottlingData { + uint64 periods = 1; + uint64 throttled_periods = 2; + uint64 throttled_time = 3; +} + +message CpuStats { + CpuUsage cpu_usage = 1; + ThrottlingData throttling_data = 2; + uint64 system_usage = 3; +} + +message PidsStats { + uint64 current = 1; + uint64 limit = 2; +} + +message MemoryData { + uint64 usage = 1; + uint64 max_usage = 2; + uint64 failcnt = 3; + uint64 limit = 4; +} + +message MemoryStats { + uint64 cache = 1; + MemoryData usage = 2; + MemoryData swap_usage = 3; + MemoryData kernel_usage = 4; + map stats = 5; +} + +message BlkioStatsEntry { + uint64 major = 1; + uint64 minor = 2; + string op = 3; + uint64 value = 4; +} + +message BlkioStats { + repeated BlkioStatsEntry io_service_bytes_recursive = 1; // number of bytes tranferred to and from the block device + repeated BlkioStatsEntry io_serviced_recursive = 2; + repeated BlkioStatsEntry io_queued_recursive = 3; + repeated BlkioStatsEntry io_service_time_recursive = 4; + repeated BlkioStatsEntry io_wait_time_recursive = 5; + repeated BlkioStatsEntry io_merged_recursive = 6; + repeated BlkioStatsEntry io_time_recursive = 7; + repeated BlkioStatsEntry sectors_recursive = 8; +} + +message HugetlbStats { + uint64 usage = 1; + uint64 max_usage = 2; + uint64 failcnt = 3; + uint64 limit = 4; +} + +message CgroupStats { + CpuStats cpu_stats = 1; + MemoryStats memory_stats = 2; + BlkioStats blkio_stats = 3; + map hugetlb_stats = 4; // the map is in the format "size of hugepage: stats of the hugepage" + PidsStats pids_stats = 5; +} + +message StatsResponse { + repeated NetworkStats network_stats = 1; + CgroupStats cgroup_stats = 2; + uint64 timestamp = 3; +}; + +message StatsRequest { + string id = 1; +} diff --git a/vendor/github.com/docker/containerd/archutils/epoll.go b/vendor/github.com/docker/containerd/archutils/epoll.go new file mode 100644 index 00000000..c8ade640 --- /dev/null +++ b/vendor/github.com/docker/containerd/archutils/epoll.go @@ -0,0 +1,19 @@ +// +build linux,!arm64 + +package archutils + +import ( + "syscall" +) + +func EpollCreate1(flag int) (int, error) { + return syscall.EpollCreate1(flag) +} + +func EpollCtl(epfd int, op int, fd int, event *syscall.EpollEvent) error { + return syscall.EpollCtl(epfd, op, fd, event) +} + +func EpollWait(epfd int, events []syscall.EpollEvent, msec int) (int, error) { + return syscall.EpollWait(epfd, events, msec) +} diff --git a/vendor/github.com/docker/containerd/archutils/epoll_arm64.go b/vendor/github.com/docker/containerd/archutils/epoll_arm64.go new file mode 100644 index 00000000..00abc683 --- /dev/null +++ b/vendor/github.com/docker/containerd/archutils/epoll_arm64.go @@ -0,0 +1,70 @@ +// +build linux,arm64 + +package archutils + +// #include +/* +int EpollCreate1(int flag) { + return epoll_create1(flag); +} + +int EpollCtl(int efd, int op,int sfd, int events, int fd) { + struct epoll_event event; + event.events = events; + event.data.fd = fd; + + return epoll_ctl(efd, op, sfd, &event); +} + +struct event_t { + uint32_t events; + int fd; +}; + +struct epoll_event events[128]; +int run_epoll_wait(int fd, struct event_t *event) { + int n, i; + n = epoll_wait(fd, events, 128, -1); + for (i = 0; i < n; i++) { + event[i].events = events[i].events; + event[i].fd = events[i].data.fd; + } + return n; +} +*/ +import "C" + +import ( + "fmt" + "syscall" + "unsafe" +) + +func EpollCreate1(flag int) (int, error) { + fd := int(C.EpollCreate1(C.int(flag))) + if fd < 0 { + return fd, fmt.Errorf("failed to create epoll, errno is %d", fd) + } + return fd, nil +} + +func EpollCtl(epfd int, op int, fd int, event *syscall.EpollEvent) error { + errno := C.EpollCtl(C.int(epfd), C.int(syscall.EPOLL_CTL_ADD), C.int(fd), C.int(event.Events), C.int(event.Fd)) + if errno < 0 { + return fmt.Errorf("Failed to ctl epoll") + } + return nil +} + +func EpollWait(epfd int, events []syscall.EpollEvent, msec int) (int, error) { + var c_events [128]C.struct_event_t + n := int(C.run_epoll_wait(C.int(epfd), (*C.struct_event_t)(unsafe.Pointer(&c_events)))) + if n < 0 { + return int(n), fmt.Errorf("Failed to wait epoll") + } + for i := 0; i < n; i++ { + events[i].Fd = int32(c_events[i].fd) + events[i].Events = uint32(c_events[i].events) + } + return int(n), nil +} diff --git a/vendor/github.com/docker/containerd/osutils/fds.go b/vendor/github.com/docker/containerd/osutils/fds.go new file mode 100644 index 00000000..98fc9305 --- /dev/null +++ b/vendor/github.com/docker/containerd/osutils/fds.go @@ -0,0 +1,18 @@ +// +build !windows,!darwin + +package osutils + +import ( + "io/ioutil" + "path/filepath" + "strconv" +) + +// GetOpenFds returns the number of open fds for the process provided by pid +func GetOpenFds(pid int) (int, error) { + dirs, err := ioutil.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) + if err != nil { + return -1, err + } + return len(dirs), nil +} diff --git a/vendor/github.com/docker/containerd/osutils/prctl.go b/vendor/github.com/docker/containerd/osutils/prctl.go new file mode 100644 index 00000000..eeb3da1b --- /dev/null +++ b/vendor/github.com/docker/containerd/osutils/prctl.go @@ -0,0 +1,43 @@ +// +build linux + +// http://man7.org/linux/man-pages/man2/prctl.2.html +package osutils + +import ( + "syscall" + "unsafe" +) + +// If arg2 is nonzero, set the "child subreaper" attribute of the +// calling process; if arg2 is zero, unset the attribute. When a +// process is marked as a child subreaper, all of the children +// that it creates, and their descendants, will be marked as +// having a subreaper. In effect, a subreaper fulfills the role +// of init(1) for its descendant processes. Upon termination of +// a process that is orphaned (i.e., its immediate parent has +// already terminated) and marked as having a subreaper, the +// nearest still living ancestor subreaper will receive a SIGCHLD +// signal and be able to wait(2) on the process to discover its +// termination status. +const PR_SET_CHILD_SUBREAPER = 36 + +// Return the "child subreaper" setting of the caller, in the +// location pointed to by (int *) arg2. +const PR_GET_CHILD_SUBREAPER = 37 + +// GetSubreaper returns the subreaper setting for the calling process +func GetSubreaper() (int, error) { + var i uintptr + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0); err != 0 { + return -1, err + } + return int(i), nil +} + +// SetSubreaper sets the value i as the subreaper setting for the calling process +func SetSubreaper(i int) error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, PR_SET_CHILD_SUBREAPER, uintptr(i), 0); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/docker/containerd/osutils/prctl_solaris.go b/vendor/github.com/docker/containerd/osutils/prctl_solaris.go new file mode 100644 index 00000000..84b2a777 --- /dev/null +++ b/vendor/github.com/docker/containerd/osutils/prctl_solaris.go @@ -0,0 +1,18 @@ +// +build solaris + +package osutils + +import ( + "errors" +) + +//Solaris TODO +// GetSubreaper returns the subreaper setting for the calling process +func GetSubreaper() (int, error) { + return 0, errors.New("osutils GetSubreaper not implemented on Solaris") +} + +// SetSubreaper sets the value i as the subreaper setting for the calling process +func SetSubreaper(i int) error { + return errors.New("osutils SetSubreaper not implemented on Solaris") +} diff --git a/vendor/github.com/docker/containerd/osutils/reaper.go b/vendor/github.com/docker/containerd/osutils/reaper.go new file mode 100644 index 00000000..ab7d24d9 --- /dev/null +++ b/vendor/github.com/docker/containerd/osutils/reaper.go @@ -0,0 +1,47 @@ +// +build !windows + +package osutils + +import "syscall" + +// Exit is the wait4 information from an exited process +type Exit struct { + Pid int + Status int +} + +// Reap reaps all child processes for the calling process and returns their +// exit information +func Reap() (exits []Exit, err error) { + var ( + ws syscall.WaitStatus + rus syscall.Rusage + ) + for { + pid, err := syscall.Wait4(-1, &ws, syscall.WNOHANG, &rus) + if err != nil { + if err == syscall.ECHILD { + return exits, nil + } + return exits, err + } + if pid <= 0 { + return exits, nil + } + exits = append(exits, Exit{ + Pid: pid, + Status: exitStatus(ws), + }) + } +} + +const exitSignalOffset = 128 + +// exitStatus returns the correct exit status for a process based on if it +// was signaled or exited cleanly +func exitStatus(status syscall.WaitStatus) int { + if status.Signaled() { + return exitSignalOffset + int(status.Signal()) + } + return status.ExitStatus() +} diff --git a/vendor/github.com/docker/containerd/runtime/container.go b/vendor/github.com/docker/containerd/runtime/container.go new file mode 100644 index 00000000..b8bfce18 --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/container.go @@ -0,0 +1,581 @@ +package runtime + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/specs" + "github.com/docker/containerd/subreaper/exec" + ocs "github.com/opencontainers/runtime-spec/specs-go" +) + +type Container interface { + // ID returns the container ID + ID() string + // Path returns the path to the bundle + Path() string + // Start starts the init process of the container + Start(checkpoint string, s Stdio) (Process, error) + // Exec starts another process in an existing container + Exec(string, specs.ProcessSpec, Stdio) (Process, error) + // Delete removes the container's state and any resources + Delete() error + // Processes returns all the containers processes that have been added + Processes() ([]Process, error) + // State returns the containers runtime state + State() State + // Resume resumes a paused container + Resume() error + // Pause pauses a running container + Pause() error + // RemoveProcess removes the specified process from the container + RemoveProcess(string) error + // Checkpoints returns all the checkpoints for a container + Checkpoints() ([]Checkpoint, error) + // Checkpoint creates a new checkpoint + Checkpoint(Checkpoint) error + // DeleteCheckpoint deletes the checkpoint for the provided name + DeleteCheckpoint(name string) error + // Labels are user provided labels for the container + Labels() []string + // Pids returns all pids inside the container + Pids() ([]int, error) + // Stats returns realtime container stats and resource information + Stats() (*Stat, error) + // Name or path of the OCI compliant runtime used to execute the container + Runtime() string + // OOM signals the channel if the container received an OOM notification + OOM() (OOM, error) + // UpdateResource updates the containers resources to new values + UpdateResources(*Resource) error + + // Status return the current status of the container. + Status() (State, error) +} + +type OOM interface { + io.Closer + FD() int + ContainerID() string + Flush() + Removed() bool +} + +type Stdio struct { + Stdin string + Stdout string + Stderr string +} + +func NewStdio(stdin, stdout, stderr string) Stdio { + for _, s := range []*string{ + &stdin, &stdout, &stderr, + } { + if *s == "" { + *s = "/dev/null" + } + } + return Stdio{ + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + } +} + +type ContainerOpts struct { + Root string + ID string + Bundle string + Runtime string + RuntimeArgs []string + Shim string + Labels []string + NoPivotRoot bool + Timeout time.Duration +} + +// New returns a new container +func New(opts ContainerOpts) (Container, error) { + c := &container{ + root: opts.Root, + id: opts.ID, + bundle: opts.Bundle, + labels: opts.Labels, + processes: make(map[string]Process), + runtime: opts.Runtime, + runtimeArgs: opts.RuntimeArgs, + shim: opts.Shim, + noPivotRoot: opts.NoPivotRoot, + timeout: opts.Timeout, + } + if err := os.Mkdir(filepath.Join(c.root, c.id), 0755); err != nil { + return nil, err + } + f, err := os.Create(filepath.Join(c.root, c.id, StateFile)) + if err != nil { + return nil, err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(state{ + Bundle: c.bundle, + Labels: c.labels, + Runtime: c.runtime, + RuntimeArgs: c.runtimeArgs, + Shim: c.shim, + NoPivotRoot: opts.NoPivotRoot, + }); err != nil { + return nil, err + } + return c, nil +} + +func Load(root, id string, timeout time.Duration) (Container, error) { + var s state + f, err := os.Open(filepath.Join(root, id, StateFile)) + if err != nil { + return nil, err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&s); err != nil { + return nil, err + } + c := &container{ + root: root, + id: id, + bundle: s.Bundle, + labels: s.Labels, + runtime: s.Runtime, + runtimeArgs: s.RuntimeArgs, + shim: s.Shim, + noPivotRoot: s.NoPivotRoot, + processes: make(map[string]Process), + timeout: timeout, + } + dirs, err := ioutil.ReadDir(filepath.Join(root, id)) + if err != nil { + return nil, err + } + for _, d := range dirs { + if !d.IsDir() { + continue + } + pid := d.Name() + s, err := readProcessState(filepath.Join(root, id, pid)) + if err != nil { + return nil, err + } + p, err := loadProcess(filepath.Join(root, id, pid), pid, c, s) + if err != nil { + logrus.WithField("id", id).WithField("pid", pid).Debug("containerd: error loading process %s", err) + continue + } + c.processes[pid] = p + } + return c, nil +} + +func readProcessState(dir string) (*ProcessState, error) { + f, err := os.Open(filepath.Join(dir, "process.json")) + if err != nil { + return nil, err + } + defer f.Close() + var s ProcessState + if err := json.NewDecoder(f).Decode(&s); err != nil { + return nil, err + } + return &s, nil +} + +type container struct { + // path to store runtime state information + root string + id string + bundle string + runtime string + runtimeArgs []string + shim string + processes map[string]Process + labels []string + oomFds []int + noPivotRoot bool + timeout time.Duration +} + +func (c *container) ID() string { + return c.id +} + +func (c *container) Path() string { + return c.bundle +} + +func (c *container) Labels() []string { + return c.labels +} + +func (c *container) readSpec() (*specs.Spec, error) { + var spec specs.Spec + f, err := os.Open(filepath.Join(c.bundle, "config.json")) + if err != nil { + return nil, err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&spec); err != nil { + return nil, err + } + return &spec, nil +} + +func (c *container) Delete() error { + err := os.RemoveAll(filepath.Join(c.root, c.id)) + + args := c.runtimeArgs + args = append(args, "delete", c.id) + if derr := exec.Command(c.runtime, args...).Run(); err == nil { + err = derr + } + return err +} + +func (c *container) Processes() ([]Process, error) { + out := []Process{} + for _, p := range c.processes { + out = append(out, p) + } + return out, nil +} + +func (c *container) RemoveProcess(pid string) error { + delete(c.processes, pid) + return os.RemoveAll(filepath.Join(c.root, c.id, pid)) +} + +func (c *container) State() State { + proc := c.processes["init"] + if proc == nil { + return Stopped + } + return proc.State() +} + +func (c *container) Runtime() string { + return c.runtime +} + +func (c *container) Pause() error { + args := c.runtimeArgs + args = append(args, "pause", c.id) + b, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %q", err.Error(), string(b)) + } + return nil +} + +func (c *container) Resume() error { + args := c.runtimeArgs + args = append(args, "resume", c.id) + b, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %q", err.Error(), string(b)) + } + return nil +} + +func (c *container) Checkpoints() ([]Checkpoint, error) { + dirs, err := ioutil.ReadDir(filepath.Join(c.bundle, "checkpoints")) + if err != nil { + return nil, err + } + var out []Checkpoint + for _, d := range dirs { + if !d.IsDir() { + continue + } + path := filepath.Join(c.bundle, "checkpoints", d.Name(), "config.json") + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + var cpt Checkpoint + if err := json.Unmarshal(data, &cpt); err != nil { + return nil, err + } + out = append(out, cpt) + } + return out, nil +} + +func (c *container) Checkpoint(cpt Checkpoint) error { + if err := os.MkdirAll(filepath.Join(c.bundle, "checkpoints"), 0755); err != nil { + return err + } + path := filepath.Join(c.bundle, "checkpoints", cpt.Name) + if err := os.Mkdir(path, 0755); err != nil { + return err + } + f, err := os.Create(filepath.Join(path, "config.json")) + if err != nil { + return err + } + cpt.Created = time.Now() + err = json.NewEncoder(f).Encode(cpt) + f.Close() + if err != nil { + return err + } + args := []string{ + "checkpoint", + "--image-path", path, + } + add := func(flags ...string) { + args = append(args, flags...) + } + add(c.runtimeArgs...) + if !cpt.Exit { + add("--leave-running") + } + if cpt.Shell { + add("--shell-job") + } + if cpt.Tcp { + add("--tcp-established") + } + if cpt.UnixSockets { + add("--ext-unix-sk") + } + add(c.id) + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %q", err.Error(), string(out)) + } + return err +} + +func (c *container) DeleteCheckpoint(name string) error { + return os.RemoveAll(filepath.Join(c.bundle, "checkpoints", name)) +} + +func (c *container) Start(checkpoint string, s Stdio) (Process, error) { + processRoot := filepath.Join(c.root, c.id, InitProcessID) + if err := os.Mkdir(processRoot, 0755); err != nil { + return nil, err + } + spec, err := c.readSpec() + if err != nil { + return nil, err + } + config := &processConfig{ + checkpoint: checkpoint, + root: processRoot, + id: InitProcessID, + c: c, + stdio: s, + spec: spec, + processSpec: specs.ProcessSpec(spec.Process), + } + p, err := c.newProcess(config) + if err != nil { + return nil, err + } + if err := p.Start(); err != nil { + return nil, err + } + c.processes[InitProcessID] = p + return p, nil +} + +func (c *container) Exec(pid string, pspec specs.ProcessSpec, s Stdio) (pp Process, err error) { + processRoot := filepath.Join(c.root, c.id, pid) + if err := os.Mkdir(processRoot, 0755); err != nil { + return nil, err + } + defer func() { + if err != nil { + c.RemoveProcess(pid) + } + }() + spec, err := c.readSpec() + if err != nil { + return nil, err + } + config := &processConfig{ + exec: true, + id: pid, + root: processRoot, + c: c, + processSpec: pspec, + spec: spec, + stdio: s, + } + p, err := c.newProcess(config) + if err != nil { + return nil, err + } + if err := p.Start(); err != nil { + return nil, err + } + c.processes[pid] = p + return p, nil +} + +func hostIDFromMap(id uint32, mp []ocs.IDMapping) int { + for _, m := range mp { + if (id >= m.ContainerID) && (id <= (m.ContainerID + m.Size - 1)) { + return int(m.HostID + (id - m.ContainerID)) + } + } + return 0 +} + +func (c *container) Pids() ([]int, error) { + args := c.runtimeArgs + args = append(args, "ps", "--format=json", c.id) + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("%s: %q", err.Error(), out) + } + var pids []int + if err := json.Unmarshal(out, &pids); err != nil { + return nil, err + } + return pids, nil +} + +func (c *container) Stats() (*Stat, error) { + now := time.Now() + args := c.runtimeArgs + args = append(args, "events", "--stats", c.id) + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("%s: %q", err.Error(), out) + } + s := struct { + Data *Stat `json:"data"` + }{} + if err := json.Unmarshal(out, &s); err != nil { + return nil, err + } + s.Data.Timestamp = now + return s.Data, nil +} + +// Status implements the runtime Container interface. +func (c *container) Status() (State, error) { + args := c.runtimeArgs + args = append(args, "state", c.id) + + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return "", fmt.Errorf("%s: %q", err.Error(), out) + } + + // We only require the runtime json output to have a top level Status field. + var s struct { + Status State `json:"status"` + } + if err := json.Unmarshal(out, &s); err != nil { + return "", err + } + return s.Status, nil +} + +func (c *container) writeEventFD(root string, cfd, efd int) error { + f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd)) + return err +} + +func (c *container) newProcess(config *processConfig) (Process, error) { + if c.shim == "" { + return newDirectProcess(config) + } + return newProcess(config) +} + +type waitArgs struct { + pid int + err error +} + +// isAlive checks if the shim that launched the container is still alive +func isAlive(cmd *exec.Cmd) (bool, error) { + if err := syscall.Kill(cmd.Process.Pid, 0); err != nil { + if err == syscall.ESRCH { + return false, nil + } + return false, err + } + return true, nil +} + +type oom struct { + id string + root string + control *os.File + eventfd int +} + +func (o *oom) ContainerID() string { + return o.id +} + +func (o *oom) FD() int { + return o.eventfd +} + +func (o *oom) Flush() { + buf := make([]byte, 8) + syscall.Read(o.eventfd, buf) +} + +func (o *oom) Removed() bool { + _, err := os.Lstat(filepath.Join(o.root, "cgroup.event_control")) + return os.IsNotExist(err) +} + +func (o *oom) Close() error { + err := syscall.Close(o.eventfd) + if cerr := o.control.Close(); err == nil { + err = cerr + } + return err +} + +type message struct { + Level string `json:"level"` + Msg string `json:"msg"` +} + +func readLogMessages(path string) ([]message, error) { + var out []message + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + dec := json.NewDecoder(f) + for { + var m message + if err := dec.Decode(&m); err != nil { + if err == io.EOF { + break + } + return nil, err + } + out = append(out, m) + } + return out, nil +} diff --git a/vendor/github.com/docker/containerd/runtime/container_linux.go b/vendor/github.com/docker/containerd/runtime/container_linux.go new file mode 100644 index 00000000..9e2e6d6b --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/container_linux.go @@ -0,0 +1,134 @@ +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/containerd/specs" + "github.com/docker/containerd/subreaper/exec" + "github.com/opencontainers/runc/libcontainer" + ocs "github.com/opencontainers/runtime-spec/specs-go" +) + +func (c *container) getLibctContainer() (libcontainer.Container, error) { + runtimeRoot := "/run/runc" + + // Check that the root wasn't changed + for _, opt := range c.runtimeArgs { + if strings.HasPrefix(opt, "--root=") { + runtimeRoot = strings.TrimPrefix(opt, "--root=") + break + } + } + + f, err := libcontainer.New(runtimeRoot, libcontainer.Cgroupfs) + if err != nil { + return nil, err + } + return f.Load(c.id) +} + +func (c *container) OOM() (OOM, error) { + container, err := c.getLibctContainer() + if err != nil { + if lerr, ok := err.(libcontainer.Error); ok { + // with oom registration sometimes the container can run, exit, and be destroyed + // faster than we can get the state back so we can just ignore this + if lerr.Code() == libcontainer.ContainerNotExists { + return nil, ErrContainerExited + } + } + return nil, err + } + state, err := container.State() + if err != nil { + return nil, err + } + memoryPath := state.CgroupPaths["memory"] + return c.getMemeoryEventFD(memoryPath) +} + +func u64Ptr(i uint64) *uint64 { return &i } + +func (c *container) UpdateResources(r *Resource) error { + sr := ocs.Resources{ + Memory: &ocs.Memory{ + Limit: u64Ptr(uint64(r.Memory)), + Reservation: u64Ptr(uint64(r.MemoryReservation)), + Swap: u64Ptr(uint64(r.MemorySwap)), + Kernel: u64Ptr(uint64(r.KernelMemory)), + }, + CPU: &ocs.CPU{ + Shares: u64Ptr(uint64(r.CPUShares)), + Quota: u64Ptr(uint64(r.CPUQuota)), + Period: u64Ptr(uint64(r.CPUPeriod)), + Cpus: &r.CpusetCpus, + Mems: &r.CpusetMems, + }, + BlockIO: &ocs.BlockIO{ + Weight: &r.BlkioWeight, + }, + } + + srStr := bytes.NewBuffer(nil) + if err := json.NewEncoder(srStr).Encode(&sr); err != nil { + return err + } + + args := c.runtimeArgs + args = append(args, "update", "-r", "-", c.id) + cmd := exec.Command(c.runtime, args...) + cmd.Stdin = srStr + b, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf(string(b)) + } + return nil +} + +func getRootIDs(s *specs.Spec) (int, int, error) { + if s == nil { + return 0, 0, nil + } + var hasUserns bool + for _, ns := range s.Linux.Namespaces { + if ns.Type == ocs.UserNamespace { + hasUserns = true + break + } + } + if !hasUserns { + return 0, 0, nil + } + uid := hostIDFromMap(0, s.Linux.UIDMappings) + gid := hostIDFromMap(0, s.Linux.GIDMappings) + return uid, gid, nil +} + +func (c *container) getMemeoryEventFD(root string) (*oom, error) { + f, err := os.Open(filepath.Join(root, "memory.oom_control")) + if err != nil { + return nil, err + } + fd, _, serr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) + if serr != 0 { + f.Close() + return nil, serr + } + if err := c.writeEventFD(root, int(f.Fd()), int(fd)); err != nil { + syscall.Close(int(fd)) + f.Close() + return nil, err + } + return &oom{ + root: root, + id: c.id, + eventfd: int(fd), + control: f, + }, nil +} diff --git a/vendor/github.com/docker/containerd/runtime/container_solaris.go b/vendor/github.com/docker/containerd/runtime/container_solaris.go new file mode 100644 index 00000000..4d0d8b4a --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/container_solaris.go @@ -0,0 +1,19 @@ +package runtime + +import ( + "errors" + + "github.com/docker/containerd/specs" +) + +func (c *container) OOM() (OOM, error) { + return nil, errors.New("runtime OOM() not implemented on Solaris") +} + +func (c *container) UpdateResources(r *Resource) error { + return errors.New("runtime UpdateResources() not implemented on Solaris") +} + +func getRootIDs(s *specs.Spec) (int, int, error) { + return 0, 0, errors.New("runtime getRootIDs() not implemented on Solaris") +} diff --git a/vendor/github.com/docker/containerd/runtime/direct_process.go b/vendor/github.com/docker/containerd/runtime/direct_process.go new file mode 100644 index 00000000..fe8ee894 --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/direct_process.go @@ -0,0 +1,283 @@ +package runtime + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path" + "path/filepath" + "sync" + "syscall" + + "github.com/docker/containerd/specs" + "github.com/docker/containerd/subreaper" + "github.com/docker/containerd/subreaper/exec" + "github.com/docker/docker/pkg/term" + "github.com/opencontainers/runc/libcontainer" +) + +type directProcess struct { + *process + sync.WaitGroup + + io stdio + console libcontainer.Console + consolePath string + exec bool + checkpoint string + specs *specs.Spec +} + +func newDirectProcess(config *processConfig) (*directProcess, error) { + lp, err := newProcess(config) + if err != nil { + return nil, err + } + + return &directProcess{ + specs: config.spec, + process: lp, + exec: config.exec, + checkpoint: config.checkpoint, + }, nil +} + +func (d *directProcess) CloseStdin() error { + if d.io.stdin != nil { + return d.io.stdin.Close() + } + return nil +} + +func (d *directProcess) Resize(w, h int) error { + if d.console == nil { + return nil + } + ws := term.Winsize{ + Width: uint16(w), + Height: uint16(h), + } + return term.SetWinsize(d.console.Fd(), &ws) +} + +func (d *directProcess) openIO() (*os.File, *os.File, *os.File, error) { + uid, gid, err := getRootIDs(d.specs) + if err != nil { + return nil, nil, nil, err + } + + if d.spec.Terminal { + console, err := libcontainer.NewConsole(uid, gid) + if err != nil { + return nil, nil, nil, err + } + d.console = console + d.consolePath = console.Path() + stdin, err := os.OpenFile(d.stdio.Stdin, syscall.O_RDONLY, 0) + if err != nil { + return nil, nil, nil, err + } + go io.Copy(console, stdin) + stdout, err := os.OpenFile(d.stdio.Stdout, syscall.O_RDWR, 0) + if err != nil { + return nil, nil, nil, err + } + d.Add(1) + go func() { + io.Copy(stdout, console) + console.Close() + d.Done() + }() + d.io.stdin = stdin + d.io.stdout = stdout + d.io.stderr = stdout + return nil, nil, nil, nil + } + + stdin, err := os.OpenFile(d.stdio.Stdin, syscall.O_RDONLY|syscall.O_NONBLOCK, 0) + if err != nil { + return nil, nil, nil, err + } + + stdout, err := os.OpenFile(d.stdio.Stdout, syscall.O_RDWR, 0) + if err != nil { + return nil, nil, nil, err + } + + stderr, err := os.OpenFile(d.stdio.Stderr, syscall.O_RDWR, 0) + if err != nil { + return nil, nil, nil, err + } + + d.io.stdin = stdin + d.io.stdout = stdout + d.io.stderr = stderr + return stdin, stdout, stderr, nil +} + +func (d *directProcess) loadCheckpoint(bundle string) (*Checkpoint, error) { + if d.checkpoint == "" { + return nil, nil + } + + f, err := os.Open(filepath.Join(bundle, "checkpoints", d.checkpoint, "config.json")) + if err != nil { + return nil, err + } + defer f.Close() + var cpt Checkpoint + if err := json.NewDecoder(f).Decode(&cpt); err != nil { + return nil, err + } + return &cpt, nil +} + +func (d *directProcess) Start() error { + cwd, err := filepath.Abs(d.root) + if err != nil { + return err + } + + stdin, stdout, stderr, err := d.openIO() + if err != nil { + return nil + } + + checkpoint, err := d.loadCheckpoint(d.container.bundle) + if err != nil { + return err + } + + logPath := filepath.Join(cwd, "log.json") + args := append([]string{ + "--log", logPath, + "--log-format", "json", + }, d.container.runtimeArgs...) + if d.exec { + args = append(args, "exec", + "--process", filepath.Join(cwd, "process.json"), + "--console", d.consolePath, + ) + } else if checkpoint != nil { + args = append(args, "restore", + "--image-path", filepath.Join(d.container.bundle, "checkpoints", checkpoint.Name), + ) + add := func(flags ...string) { + args = append(args, flags...) + } + if checkpoint.Shell { + add("--shell-job") + } + if checkpoint.Tcp { + add("--tcp-established") + } + if checkpoint.UnixSockets { + add("--ext-unix-sk") + } + if d.container.noPivotRoot { + add("--no-pivot") + } + } else { + args = append(args, "start", + "--bundle", d.container.bundle, + "--console", d.consolePath, + ) + if d.container.noPivotRoot { + args = append(args, "--no-pivot") + } + } + args = append(args, + "-d", + "--pid-file", filepath.Join(cwd, "pid"), + d.container.id, + ) + cmd := exec.Command(d.container.runtime, args...) + cmd.Dir = d.container.bundle + cmd.Stdin = stdin + cmd.Stdout = stdout + cmd.Stderr = stderr + // set the parent death signal to SIGKILL so that if containerd dies the container + // process also dies + cmd.SysProcAttr = &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGKILL, + } + + exitSubscription := subreaper.Subscribe() + err = d.startCmd(cmd) + if err != nil { + subreaper.Unsubscribe(exitSubscription) + d.delete() + return err + } + + go d.watch(cmd, exitSubscription) + + return nil +} + +func (d *directProcess) watch(cmd *exec.Cmd, exitSubscription *subreaper.Subscription) { + defer subreaper.Unsubscribe(exitSubscription) + defer d.delete() + + f, err := os.OpenFile(path.Join(d.root, ExitFile), syscall.O_WRONLY, 0) + if err == nil { + defer f.Close() + } + + exitCode := 0 + if err = cmd.Wait(); err != nil { + if exitError, ok := err.(exec.ExitCodeError); ok { + exitCode = exitError.Code + } + } + + if exitCode == 0 { + pid, err := d.getPidFromFile() + if err != nil { + return + } + exitSubscription.SetPid(pid) + exitCode = exitSubscription.Wait() + } + + writeInt(path.Join(d.root, ExitStatusFile), exitCode) +} + +func (d *directProcess) delete() { + if d.console != nil { + d.console.Close() + } + d.io.Close() + d.Wait() + if !d.exec { + exec.Command(d.container.runtime, append(d.container.runtimeArgs, "delete", d.container.id)...).Run() + } +} + +func writeInt(path string, i int) error { + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + _, err = fmt.Fprintf(f, "%d", i) + return err +} + +type stdio struct { + stdin *os.File + stdout *os.File + stderr *os.File +} + +func (s stdio) Close() error { + err := s.stdin.Close() + if oerr := s.stdout.Close(); err == nil { + err = oerr + } + if oerr := s.stderr.Close(); err == nil { + err = oerr + } + return err +} diff --git a/vendor/github.com/docker/containerd/runtime/process.go b/vendor/github.com/docker/containerd/runtime/process.go new file mode 100644 index 00000000..0e8738d7 --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/process.go @@ -0,0 +1,340 @@ +package runtime + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "syscall" + "time" + + "github.com/docker/containerd/specs" + "github.com/docker/containerd/subreaper/exec" + "golang.org/x/sys/unix" +) + +type Process interface { + io.Closer + + // ID of the process. + // This is either "init" when it is the container's init process or + // it is a user provided id for the process similar to the container id + ID() string + CloseStdin() error + Resize(int, int) error + // ExitFD returns the fd the provides an event when the process exits + ExitFD() int + // ExitStatus returns the exit status of the process or an error if it + // has not exited + ExitStatus() (int, error) + // Spec returns the process spec that created the process + Spec() specs.ProcessSpec + // Signal sends the provided signal to the process + Signal(os.Signal) error + // Container returns the container that the process belongs to + Container() Container + // Stdio of the container + Stdio() Stdio + // SystemPid is the pid on the system + SystemPid() int + // State returns if the process is running or not + State() State + // Start executes the process + Start() error +} + +type processConfig struct { + id string + root string + processSpec specs.ProcessSpec + spec *specs.Spec + c *container + stdio Stdio + exec bool + checkpoint string +} + +func newProcess(config *processConfig) (*process, error) { + p := &process{ + root: config.root, + id: config.id, + container: config.c, + spec: config.processSpec, + stdio: config.stdio, + } + uid, gid, err := getRootIDs(config.spec) + if err != nil { + return nil, err + } + f, err := os.Create(filepath.Join(config.root, "process.json")) + if err != nil { + return nil, err + } + defer f.Close() + + ps := ProcessState{ + ProcessSpec: config.processSpec, + Exec: config.exec, + PlatformProcessState: PlatformProcessState{ + Checkpoint: config.checkpoint, + RootUID: uid, + RootGID: gid, + }, + Stdin: config.stdio.Stdin, + Stdout: config.stdio.Stdout, + Stderr: config.stdio.Stderr, + RuntimeArgs: config.c.runtimeArgs, + NoPivotRoot: config.c.noPivotRoot, + } + + if err := json.NewEncoder(f).Encode(ps); err != nil { + return nil, err + } + exit, err := getExitPipe(filepath.Join(config.root, ExitFile)) + if err != nil { + return nil, err + } + control, err := getControlPipe(filepath.Join(config.root, ControlFile)) + if err != nil { + return nil, err + } + p.exitPipe = exit + p.controlPipe = control + return p, nil +} + +func (p *process) Start() error { + cmd := exec.Command(p.container.shim, + p.container.id, p.container.bundle, p.container.runtime, + ) + cmd.Dir = p.root + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + + return p.startCmd(cmd) +} + +func loadProcess(root, id string, c *container, s *ProcessState) (*process, error) { + p := &process{ + root: root, + id: id, + container: c, + spec: s.ProcessSpec, + stdio: Stdio{ + Stdin: s.Stdin, + Stdout: s.Stdout, + Stderr: s.Stderr, + }, + } + if _, err := p.getPidFromFile(); err != nil { + return nil, err + } + if _, err := p.ExitStatus(); err != nil { + if err == ErrProcessNotExited { + exit, err := getExitPipe(filepath.Join(root, ExitFile)) + if err != nil { + return nil, err + } + p.exitPipe = exit + return p, nil + } + return nil, err + } + return p, nil +} + +type process struct { + root string + id string + pid int + exitPipe *os.File + controlPipe *os.File + container *container + spec specs.ProcessSpec + stdio Stdio +} + +func (p *process) ID() string { + return p.id +} + +func (p *process) Container() Container { + return p.container +} + +func (p *process) SystemPid() int { + return p.pid +} + +// ExitFD returns the fd of the exit pipe +func (p *process) ExitFD() int { + return int(p.exitPipe.Fd()) +} + +func (p *process) CloseStdin() error { + _, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 0, 0, 0) + return err +} + +func (p *process) Resize(w, h int) error { + _, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 1, w, h) + return err +} + +func (p *process) ExitStatus() (int, error) { + data, err := ioutil.ReadFile(filepath.Join(p.root, ExitStatusFile)) + if err != nil { + if os.IsNotExist(err) { + return -1, ErrProcessNotExited + } + return -1, err + } + if len(data) == 0 { + return -1, ErrProcessNotExited + } + return strconv.Atoi(string(data)) +} + +func (p *process) Spec() specs.ProcessSpec { + return p.spec +} + +func (p *process) Stdio() Stdio { + return p.stdio +} + +// Close closes any open files and/or resouces on the process +func (p *process) Close() error { + return p.exitPipe.Close() +} + +func (p *process) State() State { + if p.pid == 0 { + return Stopped + } + err := syscall.Kill(p.pid, 0) + if err != nil && err == syscall.ESRCH { + return Stopped + } + return Running +} + +func (p *process) getPidFromFile() (int, error) { + data, err := ioutil.ReadFile(filepath.Join(p.root, "pid")) + if err != nil { + return -1, err + } + i, err := strconv.Atoi(string(data)) + if err != nil { + return -1, errInvalidPidInt + } + p.pid = i + return i, nil +} + +func getExitPipe(path string) (*os.File, error) { + if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) { + return nil, err + } + // add NONBLOCK in case the other side has already closed or else + // this function would never return + return os.OpenFile(path, syscall.O_RDONLY|syscall.O_NONBLOCK, 0) +} + +func getControlPipe(path string) (*os.File, error) { + if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) { + return nil, err + } + return os.OpenFile(path, syscall.O_RDWR|syscall.O_NONBLOCK, 0) +} + +// Signal sends the provided signal to the process +func (p *process) Signal(s os.Signal) error { + return syscall.Kill(p.pid, s.(syscall.Signal)) +} + +func (p *process) startCmd(cmd *exec.Cmd) error { + if err := cmd.Start(); err != nil { + if exErr, ok := err.(*exec.Error); ok { + if exErr.Err == exec.ErrNotFound || exErr.Err == os.ErrNotExist { + return fmt.Errorf("%s not installed on system", p.container.shim) + } + } + return err + } + if err := p.waitForStart(cmd); err != nil { + return err + } + return nil +} + +func (p *process) waitForStart(cmd *exec.Cmd) error { + wc := make(chan error, 1) + go func() { + for { + if _, err := p.getPidFromFile(); err != nil { + if os.IsNotExist(err) || err == errInvalidPidInt { + alive, err := isAlive(cmd) + if err != nil { + wc <- err + return + } + if !alive { + // runc could have failed to run the container so lets get the error + // out of the logs or the shim could have encountered an error + messages, err := readLogMessages(filepath.Join(p.root, "shim-log.json")) + if err != nil && !os.IsNotExist(err) { + wc <- err + return + } + for _, m := range messages { + if m.Level == "error" { + wc <- fmt.Errorf("shim error: %v", m.Msg) + return + } + } + // no errors reported back from shim, check for runc/runtime errors + messages, err = readLogMessages(filepath.Join(p.root, "log.json")) + if err != nil { + if os.IsNotExist(err) { + err = ErrContainerNotStarted + } + wc <- err + return + } + for _, m := range messages { + if m.Level == "error" { + wc <- fmt.Errorf("oci runtime error: %v", m.Msg) + return + } + } + wc <- ErrContainerNotStarted + return + } + time.Sleep(15 * time.Millisecond) + continue + } + wc <- err + return + } + // the pid file was read successfully + wc <- nil + return + } + }() + select { + case err := <-wc: + if err != nil { + return err + } + return nil + case <-time.After(p.container.timeout): + cmd.Process.Kill() + cmd.Wait() + return ErrContainerStartTimeout + } +} diff --git a/vendor/github.com/docker/containerd/runtime/runtime.go b/vendor/github.com/docker/containerd/runtime/runtime.go new file mode 100644 index 00000000..ff2094d4 --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/runtime.go @@ -0,0 +1,99 @@ +package runtime + +import ( + "errors" + "time" + + "github.com/docker/containerd/specs" +) + +var ( + ErrNotChildProcess = errors.New("containerd: not a child process for container") + ErrInvalidContainerType = errors.New("containerd: invalid container type for runtime") + ErrCheckpointNotExists = errors.New("containerd: checkpoint does not exist for container") + ErrCheckpointExists = errors.New("containerd: checkpoint already exists") + ErrContainerExited = errors.New("containerd: container has exited") + ErrTerminalsNotSupported = errors.New("containerd: terminals are not supported for runtime") + ErrProcessNotExited = errors.New("containerd: process has not exited") + ErrProcessExited = errors.New("containerd: process has exited") + ErrContainerNotStarted = errors.New("containerd: container not started") + ErrContainerStartTimeout = errors.New("containerd: container did not start before the specified timeout") + + errNoPidFile = errors.New("containerd: no process pid file found") + errInvalidPidInt = errors.New("containerd: process pid is invalid") + errNotImplemented = errors.New("containerd: not implemented") +) + +const ( + ExitFile = "exit" + ExitStatusFile = "exitStatus" + StateFile = "state.json" + ControlFile = "control" + InitProcessID = "init" +) + +type Checkpoint struct { + // Timestamp is the time that checkpoint happened + Created time.Time `json:"created"` + // Name is the name of the checkpoint + Name string `json:"name"` + // Tcp checkpoints open tcp connections + Tcp bool `json:"tcp"` + // UnixSockets persists unix sockets in the checkpoint + UnixSockets bool `json:"unixSockets"` + // Shell persists tty sessions in the checkpoint + Shell bool `json:"shell"` + // Exit exits the container after the checkpoint is finished + Exit bool `json:"exit"` +} + +// PlatformProcessState container platform-specific fields in the ProcessState structure +type PlatformProcessState struct { + Checkpoint string `json:"checkpoint"` + RootUID int `json:"rootUID"` + RootGID int `json:"rootGID"` +} +type State string + +type Resource struct { + CPUShares int64 + BlkioWeight uint16 + CPUPeriod int64 + CPUQuota int64 + CpusetCpus string + CpusetMems string + KernelMemory int64 + Memory int64 + MemoryReservation int64 + MemorySwap int64 +} + +const ( + Paused = State("paused") + Stopped = State("stopped") + Running = State("running") +) + +type state struct { + Bundle string `json:"bundle"` + Labels []string `json:"labels"` + Stdin string `json:"stdin"` + Stdout string `json:"stdout"` + Stderr string `json:"stderr"` + Runtime string `json:"runtime"` + RuntimeArgs []string `json:"runtimeArgs"` + Shim string `json:"shim"` + NoPivotRoot bool `json:"noPivotRoot"` +} + +type ProcessState struct { + specs.ProcessSpec + Exec bool `json:"exec"` + Stdin string `json:"containerdStdin"` + Stdout string `json:"containerdStdout"` + Stderr string `json:"containerdStderr"` + RuntimeArgs []string `json:"runtimeArgs"` + NoPivotRoot bool `json:"noPivotRoot"` + + PlatformProcessState +} diff --git a/vendor/github.com/docker/containerd/runtime/stats.go b/vendor/github.com/docker/containerd/runtime/stats.go new file mode 100644 index 00000000..de160bf6 --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/stats.go @@ -0,0 +1,77 @@ +package runtime + +import "time" + +type Stat struct { + // Timestamp is the time that the statistics where collected + Timestamp time.Time + Cpu Cpu `json:"cpu"` + Memory Memory `json:"memory"` + Pids Pids `json:"pids"` + Blkio Blkio `json:"blkio"` + Hugetlb map[string]Hugetlb `json:"hugetlb"` +} + +type Hugetlb struct { + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +type BlkioEntry struct { + Major uint64 `json:"major,omitempty"` + Minor uint64 `json:"minor,omitempty"` + Op string `json:"op,omitempty"` + Value uint64 `json:"value,omitempty"` +} + +type Blkio struct { + IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"` + IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"` + IoQueuedRecursive []BlkioEntry `json:"ioQueueRecursive,omitempty"` + IoServiceTimeRecursive []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"` + IoWaitTimeRecursive []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"` + IoMergedRecursive []BlkioEntry `json:"ioMergedRecursive,omitempty"` + IoTimeRecursive []BlkioEntry `json:"ioTimeRecursive,omitempty"` + SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"` +} + +type Pids struct { + Current uint64 `json:"current,omitempty"` + Limit uint64 `json:"limit,omitempty"` +} + +type Throttling struct { + Periods uint64 `json:"periods,omitempty"` + ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"` + ThrottledTime uint64 `json:"throttledTime,omitempty"` +} + +type CpuUsage struct { + // Units: nanoseconds. + Total uint64 `json:"total,omitempty"` + Percpu []uint64 `json:"percpu,omitempty"` + Kernel uint64 `json:"kernel"` + User uint64 `json:"user"` +} + +type Cpu struct { + Usage CpuUsage `json:"usage,omitempty"` + Throttling Throttling `json:"throttling,omitempty"` +} + +type MemoryEntry struct { + Limit uint64 `json:"limit"` + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +type Memory struct { + Cache uint64 `json:"cache,omitempty"` + Usage MemoryEntry `json:"usage,omitempty"` + Swap MemoryEntry `json:"swap,omitempty"` + Kernel MemoryEntry `json:"kernel,omitempty"` + KernelTCP MemoryEntry `json:"kernelTCP,omitempty"` + Raw map[string]uint64 `json:"raw,omitempty"` +} diff --git a/vendor/github.com/docker/containerd/specs/spec_linux.go b/vendor/github.com/docker/containerd/specs/spec_linux.go new file mode 100644 index 00000000..205f1c81 --- /dev/null +++ b/vendor/github.com/docker/containerd/specs/spec_linux.go @@ -0,0 +1,9 @@ +package specs + +import ocs "github.com/opencontainers/runtime-spec/specs-go" + +type ( + ProcessSpec ocs.Process + Spec ocs.Spec + Rlimit ocs.Rlimit +) diff --git a/vendor/github.com/docker/containerd/specs/spec_solaris.go b/vendor/github.com/docker/containerd/specs/spec_solaris.go new file mode 100644 index 00000000..625d27f1 --- /dev/null +++ b/vendor/github.com/docker/containerd/specs/spec_solaris.go @@ -0,0 +1,8 @@ +package specs + +import ocs "github.com/opencontainers/specs/specs-go" + +type ( + ProcessSpec ocs.Process + Spec ocs.Spec +) diff --git a/vendor/github.com/docker/containerd/subreaper/exec/copy.go b/vendor/github.com/docker/containerd/subreaper/exec/copy.go new file mode 100644 index 00000000..371aa998 --- /dev/null +++ b/vendor/github.com/docker/containerd/subreaper/exec/copy.go @@ -0,0 +1,54 @@ +package exec + +import ( + "bytes" + "errors" +) + +// Below is largely a copy from go's os/exec/exec.go + +// Run starts the specified command and waits for it to complete. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the command fails to run or doesn't complete successfully, the +// error is of type *ExitError. Other error types may be +// returned for I/O problems. +func (c *Cmd) Run() error { + if err := c.Start(); err != nil { + return translateError(err) + } + return translateError(c.Wait()) +} + +// Output runs the command and returns its standard output. +// Any returned error will usually be of type *ExitError. +// If c.Stderr was nil, Output populates ExitError.Stderr. +func (c *Cmd) Output() ([]byte, error) { + if c.Stdout != nil { + return nil, errors.New("exec: Stdout already set") + } + var stdout bytes.Buffer + c.Stdout = &stdout + + err := c.Run() + return stdout.Bytes(), err +} + +// CombinedOutput runs the command and returns its combined standard +// output and standard error. +func (c *Cmd) CombinedOutput() ([]byte, error) { + if c.Stdout != nil { + return nil, errors.New("exec: Stdout already set") + } + if c.Stderr != nil { + return nil, errors.New("exec: Stderr already set") + } + var b bytes.Buffer + c.Stdout = &b + c.Stderr = &b + err := c.Run() + return b.Bytes(), err +} diff --git a/vendor/github.com/docker/containerd/subreaper/exec/wrapper.go b/vendor/github.com/docker/containerd/subreaper/exec/wrapper.go new file mode 100644 index 00000000..2a6f2d64 --- /dev/null +++ b/vendor/github.com/docker/containerd/subreaper/exec/wrapper.go @@ -0,0 +1,81 @@ +package exec + +import ( + "fmt" + osExec "os/exec" + "strconv" + + "github.com/docker/containerd/subreaper" +) + +var ErrNotFound = osExec.ErrNotFound + +type Cmd struct { + osExec.Cmd + err error + sub *subreaper.Subscription +} + +type Error struct { + Name string + Err error +} + +func (e *Error) Error() string { + return "exec: " + strconv.Quote(e.Name) + ": " + e.Err.Error() +} + +type ExitCodeError struct { + Code int +} + +func (e ExitCodeError) Error() string { + return fmt.Sprintf("Non-zero exit code: %d", e.Code) +} + +func LookPath(file string) (string, error) { + v, err := osExec.LookPath(file) + return v, translateError(err) +} + +func Command(name string, args ...string) *Cmd { + return &Cmd{ + Cmd: *osExec.Command(name, args...), + } +} + +func (c *Cmd) Start() error { + c.sub = subreaper.Subscribe() + err := c.Cmd.Start() + if err != nil { + subreaper.Unsubscribe(c.sub) + c.sub = nil + c.err = translateError(err) + return c.err + } + + c.sub.SetPid(c.Cmd.Process.Pid) + return nil +} + +func (c *Cmd) Wait() error { + if c.sub == nil { + return c.err + } + exitCode := c.sub.Wait() + if exitCode == 0 { + return nil + } + return ExitCodeError{Code: exitCode} +} + +func translateError(err error) error { + switch v := err.(type) { + case *osExec.Error: + return &Error{ + Name: v.Name, + Err: v.Err, + } + } + return err +} diff --git a/vendor/github.com/docker/containerd/subreaper/reaper.go b/vendor/github.com/docker/containerd/subreaper/reaper.go new file mode 100644 index 00000000..f7932043 --- /dev/null +++ b/vendor/github.com/docker/containerd/subreaper/reaper.go @@ -0,0 +1,102 @@ +package subreaper + +import ( + "os" + "os/signal" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/osutils" +) + +var ( + subscriptions = map[int]*Subscription{} + subLock = sync.Mutex{} + counter = 0 + once = sync.Once{} +) + +type Subscription struct { + id int + exit osutils.Exit + c chan osutils.Exit + wg sync.WaitGroup +} + +func (s *Subscription) SetPid(pid int) { + go func() { + for exit := range s.c { + if exit.Pid == pid { + s.exit = exit + s.wg.Done() + Unsubscribe(s) + } + } + }() +} + +func (s *Subscription) Wait() int { + s.wg.Wait() + return s.exit.Status +} + +func Subscribe() *Subscription { + subLock.Lock() + defer subLock.Unlock() + + Start() + + counter++ + s := &Subscription{ + id: counter, + c: make(chan osutils.Exit, 1024), + } + s.wg.Add(1) + subscriptions[s.id] = s + return s +} + +func Unsubscribe(sub *Subscription) { + subLock.Lock() + defer subLock.Unlock() + + delete(subscriptions, sub.id) +} + +func Start() error { + var err error + once.Do(func() { + err = osutils.SetSubreaper(1) + if err != nil { + return + } + + s := make(chan os.Signal, 2048) + signal.Notify(s, syscall.SIGCHLD) + go childReaper(s) + }) + + return err +} + +func childReaper(s chan os.Signal) { + for range s { + exits, err := osutils.Reap() + if err == nil { + notify(exits) + } else { + logrus.WithField("error", err).Warn("containerd: reap child processes") + } + } +} + +func notify(exits []osutils.Exit) { + subLock.Lock() + for _, exit := range exits { + for _, sub := range subscriptions { + sub.c <- exit + } + } + subLock.Unlock() +} diff --git a/vendor/github.com/docker/containerd/supervisor/add_process.go b/vendor/github.com/docker/containerd/supervisor/add_process.go new file mode 100644 index 00000000..93678ddc --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/add_process.go @@ -0,0 +1,43 @@ +package supervisor + +import ( + "time" + + "github.com/docker/containerd/runtime" + "github.com/docker/containerd/specs" +) + +type AddProcessTask struct { + baseTask + ID string + PID string + Stdout string + Stderr string + Stdin string + ProcessSpec *specs.ProcessSpec + StartResponse chan StartResponse +} + +func (s *Supervisor) addProcess(t *AddProcessTask) error { + start := time.Now() + ci, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + process, err := ci.container.Exec(t.PID, *t.ProcessSpec, runtime.NewStdio(t.Stdin, t.Stdout, t.Stderr)) + if err != nil { + return err + } + if err := s.monitorProcess(process); err != nil { + return err + } + ExecProcessTimer.UpdateSince(start) + t.StartResponse <- StartResponse{} + s.notifySubscribers(Event{ + Timestamp: time.Now(), + Type: StateStartProcess, + PID: t.PID, + ID: t.ID, + }) + return nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/checkpoint.go b/vendor/github.com/docker/containerd/supervisor/checkpoint.go new file mode 100644 index 00000000..466a35a4 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/checkpoint.go @@ -0,0 +1,33 @@ +// +build !windows + +package supervisor + +import "github.com/docker/containerd/runtime" + +type CreateCheckpointTask struct { + baseTask + ID string + Checkpoint *runtime.Checkpoint +} + +func (s *Supervisor) createCheckpoint(t *CreateCheckpointTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + return i.container.Checkpoint(*t.Checkpoint) +} + +type DeleteCheckpointTask struct { + baseTask + ID string + Checkpoint *runtime.Checkpoint +} + +func (s *Supervisor) deleteCheckpoint(t *DeleteCheckpointTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + return i.container.DeleteCheckpoint(t.Checkpoint.Name) +} diff --git a/vendor/github.com/docker/containerd/supervisor/create.go b/vendor/github.com/docker/containerd/supervisor/create.go new file mode 100644 index 00000000..817ed379 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/create.go @@ -0,0 +1,57 @@ +package supervisor + +import ( + "time" + + "github.com/docker/containerd/runtime" +) + +type StartTask struct { + baseTask + ID string + BundlePath string + Stdout string + Stderr string + Stdin string + StartResponse chan StartResponse + Labels []string + NoPivotRoot bool + Checkpoint *runtime.Checkpoint +} + +func (s *Supervisor) start(t *StartTask) error { + start := time.Now() + container, err := runtime.New(runtime.ContainerOpts{ + Root: s.stateDir, + ID: t.ID, + Bundle: t.BundlePath, + Runtime: s.runtime, + RuntimeArgs: s.runtimeArgs, + Shim: s.shim, + Labels: t.Labels, + NoPivotRoot: t.NoPivotRoot, + Timeout: s.timeout, + }) + if err != nil { + return err + } + s.containers[t.ID] = &containerInfo{ + container: container, + } + ContainersCounter.Inc(1) + task := &startTask{ + Err: t.ErrorCh(), + Container: container, + StartResponse: t.StartResponse, + Stdin: t.Stdin, + Stdout: t.Stdout, + Stderr: t.Stderr, + } + if t.Checkpoint != nil { + task.Checkpoint = t.Checkpoint.Name + } + + s.startTasks <- task + ContainerCreateTimer.UpdateSince(start) + return errDeferredResponse +} diff --git a/vendor/github.com/docker/containerd/supervisor/create_solaris.go b/vendor/github.com/docker/containerd/supervisor/create_solaris.go new file mode 100644 index 00000000..444e55bc --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/create_solaris.go @@ -0,0 +1,8 @@ +package supervisor + +type platformStartTask struct { +} + +// Checkpoint not supported on Solaris +func (task *startTask) setTaskCheckpoint(t *StartTask) { +} diff --git a/vendor/github.com/docker/containerd/supervisor/delete.go b/vendor/github.com/docker/containerd/supervisor/delete.go new file mode 100644 index 00000000..fd46f38b --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/delete.go @@ -0,0 +1,42 @@ +package supervisor + +import ( + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/runtime" +) + +type DeleteTask struct { + baseTask + ID string + Status int + PID string + NoEvent bool +} + +func (s *Supervisor) delete(t *DeleteTask) error { + if i, ok := s.containers[t.ID]; ok { + start := time.Now() + if err := s.deleteContainer(i.container); err != nil { + logrus.WithField("error", err).Error("containerd: deleting container") + } + if !t.NoEvent { + s.notifySubscribers(Event{ + Type: StateExit, + Timestamp: time.Now(), + ID: t.ID, + Status: t.Status, + PID: t.PID, + }) + } + ContainersCounter.Dec(1) + ContainerDeleteTimer.UpdateSince(start) + } + return nil +} + +func (s *Supervisor) deleteContainer(container runtime.Container) error { + delete(s.containers, container.ID()) + return container.Delete() +} diff --git a/vendor/github.com/docker/containerd/supervisor/errors.go b/vendor/github.com/docker/containerd/supervisor/errors.go new file mode 100644 index 00000000..7e421d04 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/errors.go @@ -0,0 +1,24 @@ +package supervisor + +import "errors" + +var ( + // External errors + ErrTaskChanNil = errors.New("containerd: task channel is nil") + ErrBundleNotFound = errors.New("containerd: bundle not found") + ErrContainerNotFound = errors.New("containerd: container not found") + ErrContainerExists = errors.New("containerd: container already exists") + ErrProcessNotFound = errors.New("containerd: processs not found for container") + ErrUnknownContainerStatus = errors.New("containerd: unknown container status ") + ErrUnknownTask = errors.New("containerd: unknown task type") + + // Internal errors + errShutdown = errors.New("containerd: supervisor is shutdown") + errRootNotAbs = errors.New("containerd: rootfs path is not an absolute path") + errNoContainerForPid = errors.New("containerd: pid not registered for any container") + // internal error where the handler will defer to another for the final response + // + // TODO: we could probably do a typed error with another error channel for this to make it + // less like magic + errDeferredResponse = errors.New("containerd: deferred response") +) diff --git a/vendor/github.com/docker/containerd/supervisor/exit.go b/vendor/github.com/docker/containerd/supervisor/exit.go new file mode 100644 index 00000000..8f19eee6 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/exit.go @@ -0,0 +1,81 @@ +package supervisor + +import ( + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/runtime" +) + +type ExitTask struct { + baseTask + Process runtime.Process +} + +func (s *Supervisor) exit(t *ExitTask) error { + start := time.Now() + proc := t.Process + status, err := proc.ExitStatus() + if err != nil { + logrus.WithFields(logrus.Fields{ + "error": err, + "pid": proc.ID(), + "id": proc.Container().ID(), + "systemPid": proc.SystemPid(), + }).Error("containerd: get exit status") + } + logrus.WithFields(logrus.Fields{ + "pid": proc.ID(), + "status": status, + "id": proc.Container().ID(), + "systemPid": proc.SystemPid(), + }).Debug("containerd: process exited") + + // if the process is the the init process of the container then + // fire a separate event for this process + if proc.ID() != runtime.InitProcessID { + ne := &ExecExitTask{ + ID: proc.Container().ID(), + PID: proc.ID(), + Status: status, + Process: proc, + } + s.SendTask(ne) + return nil + } + container := proc.Container() + ne := &DeleteTask{ + ID: container.ID(), + Status: status, + PID: proc.ID(), + } + s.SendTask(ne) + + ExitProcessTimer.UpdateSince(start) + + return nil +} + +type ExecExitTask struct { + baseTask + ID string + PID string + Status int + Process runtime.Process +} + +func (s *Supervisor) execExit(t *ExecExitTask) error { + container := t.Process.Container() + // exec process: we remove this process without notifying the main event loop + if err := container.RemoveProcess(t.PID); err != nil { + logrus.WithField("error", err).Error("containerd: find container for pid") + } + s.notifySubscribers(Event{ + Timestamp: time.Now(), + ID: t.ID, + Type: StateExit, + PID: t.PID, + Status: t.Status, + }) + return nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/get_containers.go b/vendor/github.com/docker/containerd/supervisor/get_containers.go new file mode 100644 index 00000000..23f49c78 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/get_containers.go @@ -0,0 +1,28 @@ +package supervisor + +import "github.com/docker/containerd/runtime" + +type GetContainersTask struct { + baseTask + ID string + Containers []runtime.Container +} + +func (s *Supervisor) getContainers(t *GetContainersTask) error { + + if t.ID != "" { + ci, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + t.Containers = append(t.Containers, ci.container) + + return nil + } + + for _, ci := range s.containers { + t.Containers = append(t.Containers, ci.container) + } + + return nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/machine.go b/vendor/github.com/docker/containerd/supervisor/machine.go new file mode 100644 index 00000000..7a21624b --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/machine.go @@ -0,0 +1,25 @@ +// +build !solaris + +package supervisor + +import "github.com/cloudfoundry/gosigar" + +type Machine struct { + Cpus int + Memory int64 +} + +func CollectMachineInformation() (Machine, error) { + m := Machine{} + cpu := sigar.CpuList{} + if err := cpu.Get(); err != nil { + return m, err + } + m.Cpus = len(cpu.List) + mem := sigar.Mem{} + if err := mem.Get(); err != nil { + return m, err + } + m.Memory = int64(mem.Total / 1024 / 1024) + return m, nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/machine_solaris.go b/vendor/github.com/docker/containerd/supervisor/machine_solaris.go new file mode 100644 index 00000000..c044705d --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/machine_solaris.go @@ -0,0 +1,15 @@ +package supervisor + +import ( + "errors" +) + +type Machine struct { + Cpus int + Memory int64 +} + +func CollectMachineInformation() (Machine, error) { + m := Machine{} + return m, errors.New("supervisor CollectMachineInformation not implemented on Solaris") +} diff --git a/vendor/github.com/docker/containerd/supervisor/metrics.go b/vendor/github.com/docker/containerd/supervisor/metrics.go new file mode 100644 index 00000000..2ba772a4 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/metrics.go @@ -0,0 +1,31 @@ +package supervisor + +import "github.com/rcrowley/go-metrics" + +var ( + ContainerCreateTimer = metrics.NewTimer() + ContainerDeleteTimer = metrics.NewTimer() + ContainerStartTimer = metrics.NewTimer() + ContainerStatsTimer = metrics.NewTimer() + ContainersCounter = metrics.NewCounter() + EventSubscriberCounter = metrics.NewCounter() + TasksCounter = metrics.NewCounter() + ExecProcessTimer = metrics.NewTimer() + ExitProcessTimer = metrics.NewTimer() + EpollFdCounter = metrics.NewCounter() +) + +func Metrics() map[string]interface{} { + return map[string]interface{}{ + "container-create-time": ContainerCreateTimer, + "container-delete-time": ContainerDeleteTimer, + "container-start-time": ContainerStartTimer, + "container-stats-time": ContainerStatsTimer, + "containers": ContainersCounter, + "event-subscribers": EventSubscriberCounter, + "tasks": TasksCounter, + "exec-process-time": ExecProcessTimer, + "exit-process-time": ExitProcessTimer, + "epoll-fds": EpollFdCounter, + } +} diff --git a/vendor/github.com/docker/containerd/supervisor/monitor_linux.go b/vendor/github.com/docker/containerd/supervisor/monitor_linux.go new file mode 100644 index 00000000..b1765853 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/monitor_linux.go @@ -0,0 +1,129 @@ +package supervisor + +import ( + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/archutils" + "github.com/docker/containerd/runtime" +) + +func NewMonitor() (*Monitor, error) { + m := &Monitor{ + receivers: make(map[int]interface{}), + exits: make(chan runtime.Process, 1024), + ooms: make(chan string, 1024), + } + fd, err := archutils.EpollCreate1(0) + if err != nil { + return nil, err + } + m.epollFd = fd + go m.start() + return m, nil +} + +type Monitor struct { + m sync.Mutex + receivers map[int]interface{} + exits chan runtime.Process + ooms chan string + epollFd int +} + +func (m *Monitor) Exits() chan runtime.Process { + return m.exits +} + +func (m *Monitor) OOMs() chan string { + return m.ooms +} + +func (m *Monitor) Monitor(p runtime.Process) error { + m.m.Lock() + defer m.m.Unlock() + fd := p.ExitFD() + event := syscall.EpollEvent{ + Fd: int32(fd), + Events: syscall.EPOLLHUP, + } + if err := archutils.EpollCtl(m.epollFd, syscall.EPOLL_CTL_ADD, fd, &event); err != nil { + return err + } + EpollFdCounter.Inc(1) + m.receivers[fd] = p + return nil +} + +func (m *Monitor) MonitorOOM(c runtime.Container) error { + m.m.Lock() + defer m.m.Unlock() + o, err := c.OOM() + if err != nil { + return err + } + fd := o.FD() + event := syscall.EpollEvent{ + Fd: int32(fd), + Events: syscall.EPOLLHUP | syscall.EPOLLIN, + } + if err := archutils.EpollCtl(m.epollFd, syscall.EPOLL_CTL_ADD, fd, &event); err != nil { + return err + } + EpollFdCounter.Inc(1) + m.receivers[fd] = o + return nil +} + +func (m *Monitor) Close() error { + return syscall.Close(m.epollFd) +} + +func (m *Monitor) start() { + var events [128]syscall.EpollEvent + for { + n, err := archutils.EpollWait(m.epollFd, events[:], -1) + if err != nil { + if err == syscall.EINTR { + continue + } + logrus.WithField("error", err).Fatal("containerd: epoll wait") + } + // process events + for i := 0; i < n; i++ { + fd := int(events[i].Fd) + m.m.Lock() + r := m.receivers[fd] + switch t := r.(type) { + case runtime.Process: + if events[i].Events == syscall.EPOLLHUP { + delete(m.receivers, fd) + if err = syscall.EpollCtl(m.epollFd, syscall.EPOLL_CTL_DEL, fd, &syscall.EpollEvent{ + Events: syscall.EPOLLHUP, + Fd: int32(fd), + }); err != nil { + logrus.WithField("error", err).Error("containerd: epoll remove fd") + } + if err := t.Close(); err != nil { + logrus.WithField("error", err).Error("containerd: close process IO") + } + EpollFdCounter.Dec(1) + m.exits <- t + } + case runtime.OOM: + // always flush the event fd + t.Flush() + if t.Removed() { + delete(m.receivers, fd) + // epoll will remove the fd from its set after it has been closed + t.Close() + EpollFdCounter.Dec(1) + } else { + m.ooms <- t.ContainerID() + } + } + m.m.Unlock() + } + } +} diff --git a/vendor/github.com/docker/containerd/supervisor/monitor_solaris.go b/vendor/github.com/docker/containerd/supervisor/monitor_solaris.go new file mode 100644 index 00000000..6ad56ac8 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/monitor_solaris.go @@ -0,0 +1,38 @@ +package supervisor + +import ( + "errors" + + "github.com/docker/containerd/runtime" +) + +func NewMonitor() (*Monitor, error) { + return &Monitor{}, errors.New("Monitor NewMonitor() not implemented on Solaris") +} + +type Monitor struct { + ooms chan string +} + +func (m *Monitor) Exits() chan runtime.Process { + return nil +} + +func (m *Monitor) OOMs() chan string { + return m.ooms +} + +func (m *Monitor) Monitor(p runtime.Process) error { + return errors.New("Monitor Monitor() not implemented on Solaris") +} + +func (m *Monitor) MonitorOOM(c runtime.Container) error { + return errors.New("Monitor MonitorOOM() not implemented on Solaris") +} + +func (m *Monitor) Close() error { + return errors.New("Monitor Close() not implemented on Solaris") +} + +func (m *Monitor) start() { +} diff --git a/vendor/github.com/docker/containerd/supervisor/oom.go b/vendor/github.com/docker/containerd/supervisor/oom.go new file mode 100644 index 00000000..e204696e --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/oom.go @@ -0,0 +1,22 @@ +package supervisor + +import ( + "time" + + "github.com/Sirupsen/logrus" +) + +type OOMTask struct { + baseTask + ID string +} + +func (s *Supervisor) oom(t *OOMTask) error { + logrus.WithField("id", t.ID).Debug("containerd: container oom") + s.notifySubscribers(Event{ + Timestamp: time.Now(), + ID: t.ID, + Type: StateOOM, + }) + return nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/signal.go b/vendor/github.com/docker/containerd/supervisor/signal.go new file mode 100644 index 00000000..0705fc56 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/signal.go @@ -0,0 +1,27 @@ +package supervisor + +import "os" + +type SignalTask struct { + baseTask + ID string + PID string + Signal os.Signal +} + +func (s *Supervisor) signal(t *SignalTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + processes, err := i.container.Processes() + if err != nil { + return err + } + for _, p := range processes { + if p.ID() == t.PID { + return p.Signal(t.Signal) + } + } + return ErrProcessNotFound +} diff --git a/vendor/github.com/docker/containerd/supervisor/sort.go b/vendor/github.com/docker/containerd/supervisor/sort.go new file mode 100644 index 00000000..2153b7e4 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/sort.go @@ -0,0 +1,27 @@ +package supervisor + +import ( + "sort" + + "github.com/docker/containerd/runtime" +) + +func sortProcesses(p []runtime.Process) { + sort.Sort(&processSorter{p}) +} + +type processSorter struct { + processes []runtime.Process +} + +func (s *processSorter) Len() int { + return len(s.processes) +} + +func (s *processSorter) Swap(i, j int) { + s.processes[i], s.processes[j] = s.processes[j], s.processes[i] +} + +func (s *processSorter) Less(i, j int) bool { + return s.processes[j].ID() == "init" +} diff --git a/vendor/github.com/docker/containerd/supervisor/stats.go b/vendor/github.com/docker/containerd/supervisor/stats.go new file mode 100644 index 00000000..00bf5f82 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/stats.go @@ -0,0 +1,33 @@ +package supervisor + +import ( + "time" + + "github.com/docker/containerd/runtime" +) + +type StatsTask struct { + baseTask + ID string + Stat chan *runtime.Stat +} + +func (s *Supervisor) stats(t *StatsTask) error { + start := time.Now() + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + // TODO: use workers for this + go func() { + s, err := i.container.Stats() + if err != nil { + t.ErrorCh() <- err + return + } + t.ErrorCh() <- nil + t.Stat <- s + ContainerStatsTimer.UpdateSince(start) + }() + return errDeferredResponse +} diff --git a/vendor/github.com/docker/containerd/supervisor/supervisor.go b/vendor/github.com/docker/containerd/supervisor/supervisor.go new file mode 100644 index 00000000..8cc253d4 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/supervisor.go @@ -0,0 +1,385 @@ +package supervisor + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/runtime" +) + +const ( + defaultBufferSize = 2048 // size of queue in eventloop +) + +// New returns an initialized Process supervisor. +func New(stateDir string, runtimeName, shimName string, runtimeArgs []string, timeout time.Duration, retainCount int) (*Supervisor, error) { + startTasks := make(chan *startTask, 10) + if err := os.MkdirAll(stateDir, 0755); err != nil { + return nil, err + } + machine, err := CollectMachineInformation() + if err != nil { + return nil, err + } + monitor, err := NewMonitor() + if err != nil { + return nil, err + } + s := &Supervisor{ + stateDir: stateDir, + containers: make(map[string]*containerInfo), + startTasks: startTasks, + machine: machine, + subscribers: make(map[chan Event]struct{}), + tasks: make(chan Task, defaultBufferSize), + monitor: monitor, + runtime: runtimeName, + runtimeArgs: runtimeArgs, + shim: shimName, + timeout: timeout, + } + if err := setupEventLog(s, retainCount); err != nil { + return nil, err + } + go s.exitHandler() + go s.oomHandler() + if err := s.restore(); err != nil { + return nil, err + } + return s, nil +} + +type containerInfo struct { + container runtime.Container +} + +func setupEventLog(s *Supervisor, retainCount int) error { + if err := readEventLog(s); err != nil { + return err + } + logrus.WithField("count", len(s.eventLog)).Debug("containerd: read past events") + events := s.Events(time.Time{}) + return eventLogger(s, filepath.Join(s.stateDir, "events.log"), events, retainCount) +} + +func eventLogger(s *Supervisor, path string, events chan Event, retainCount int) error { + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_TRUNC, 0755) + if err != nil { + return err + } + go func() { + var ( + count = len(s.eventLog) + enc = json.NewEncoder(f) + ) + for e := range events { + // if we have a specified retain count make sure the truncate the event + // log if it grows past the specified number of events to keep. + if retainCount > 0 { + if count > retainCount { + logrus.Debug("truncating event log") + // close the log file + if f != nil { + f.Close() + } + slice := retainCount - 1 + l := len(s.eventLog) + if slice >= l { + slice = l + } + s.eventLock.Lock() + s.eventLog = s.eventLog[len(s.eventLog)-slice:] + s.eventLock.Unlock() + if f, err = os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_TRUNC, 0755); err != nil { + logrus.WithField("error", err).Error("containerd: open event to journal") + continue + } + enc = json.NewEncoder(f) + count = 0 + for _, le := range s.eventLog { + if err := enc.Encode(le); err != nil { + logrus.WithField("error", err).Error("containerd: write event to journal") + } + } + } + } + s.eventLock.Lock() + s.eventLog = append(s.eventLog, e) + s.eventLock.Unlock() + count++ + if err := enc.Encode(e); err != nil { + logrus.WithField("error", err).Error("containerd: write event to journal") + } + } + }() + return nil +} + +func readEventLog(s *Supervisor) error { + f, err := os.Open(filepath.Join(s.stateDir, "events.log")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + dec := json.NewDecoder(f) + for { + var e Event + if err := dec.Decode(&e); err != nil { + if err == io.EOF { + break + } + return err + } + s.eventLog = append(s.eventLog, e) + } + return nil +} + +type Supervisor struct { + // stateDir is the directory on the system to store container runtime state information. + stateDir string + // name of the OCI compatible runtime used to execute containers + runtime string + runtimeArgs []string + shim string + containers map[string]*containerInfo + startTasks chan *startTask + // we need a lock around the subscribers map only because additions and deletions from + // the map are via the API so we cannot really control the concurrency + subscriberLock sync.RWMutex + subscribers map[chan Event]struct{} + machine Machine + tasks chan Task + monitor *Monitor + eventLog []Event + eventLock sync.Mutex + timeout time.Duration +} + +// Stop closes all startTasks and sends a SIGTERM to each container's pid1 then waits for they to +// terminate. After it has handled all the SIGCHILD events it will close the signals chan +// and exit. Stop is a non-blocking call and will return after the containers have been signaled +func (s *Supervisor) Stop() { + // Close the startTasks channel so that no new containers get started + close(s.startTasks) +} + +// Close closes any open files in the supervisor but expects that Stop has been +// callsed so that no more containers are started. +func (s *Supervisor) Close() error { + return nil +} + +type Event struct { + ID string `json:"id"` + Type string `json:"type"` + Timestamp time.Time `json:"timestamp"` + PID string `json:"pid,omitempty"` + Status int `json:"status,omitempty"` +} + +// Events returns an event channel that external consumers can use to receive updates +// on container events +func (s *Supervisor) Events(from time.Time) chan Event { + s.subscriberLock.Lock() + defer s.subscriberLock.Unlock() + c := make(chan Event, defaultBufferSize) + EventSubscriberCounter.Inc(1) + s.subscribers[c] = struct{}{} + if !from.IsZero() { + // replay old event + s.eventLock.Lock() + past := s.eventLog[:] + s.eventLock.Unlock() + for _, e := range past { + if e.Timestamp.After(from) { + c <- e + } + } + // Notify the client that from now on it's live events + c <- Event{ + Type: StateLive, + Timestamp: time.Now(), + } + } + return c +} + +// Unsubscribe removes the provided channel from receiving any more events +func (s *Supervisor) Unsubscribe(sub chan Event) { + s.subscriberLock.Lock() + defer s.subscriberLock.Unlock() + delete(s.subscribers, sub) + close(sub) + EventSubscriberCounter.Dec(1) +} + +// notifySubscribers will send the provided event to the external subscribers +// of the events channel +func (s *Supervisor) notifySubscribers(e Event) { + s.subscriberLock.RLock() + defer s.subscriberLock.RUnlock() + for sub := range s.subscribers { + // do a non-blocking send for the channel + select { + case sub <- e: + default: + logrus.WithField("event", e.Type).Warn("containerd: event not sent to subscriber") + } + } +} + +// Start is a non-blocking call that runs the supervisor for monitoring contianer processes and +// executing new containers. +// +// This event loop is the only thing that is allowed to modify state of containers and processes +// therefore it is save to do operations in the handlers that modify state of the system or +// state of the Supervisor +func (s *Supervisor) Start() error { + logrus.WithFields(logrus.Fields{ + "stateDir": s.stateDir, + "runtime": s.runtime, + "runtimeArgs": s.runtimeArgs, + "memory": s.machine.Memory, + "cpus": s.machine.Cpus, + }).Debug("containerd: supervisor running") + go func() { + for i := range s.tasks { + s.handleTask(i) + } + }() + return nil +} + +// Machine returns the machine information for which the +// supervisor is executing on. +func (s *Supervisor) Machine() Machine { + return s.machine +} + +// SendTask sends the provided event the the supervisors main event loop +func (s *Supervisor) SendTask(evt Task) { + TasksCounter.Inc(1) + s.tasks <- evt +} + +func (s *Supervisor) exitHandler() { + for p := range s.monitor.Exits() { + e := &ExitTask{ + Process: p, + } + s.SendTask(e) + } +} + +func (s *Supervisor) oomHandler() { + for id := range s.monitor.OOMs() { + e := &OOMTask{ + ID: id, + } + s.SendTask(e) + } +} + +func (s *Supervisor) monitorProcess(p runtime.Process) error { + return s.monitor.Monitor(p) +} + +func (s *Supervisor) restore() error { + dirs, err := ioutil.ReadDir(s.stateDir) + if err != nil { + return err + } + for _, d := range dirs { + if !d.IsDir() { + continue + } + id := d.Name() + container, err := runtime.Load(s.stateDir, id, s.timeout) + if err != nil { + return err + } + processes, err := container.Processes() + if err != nil { + return err + } + + ContainersCounter.Inc(1) + s.containers[id] = &containerInfo{ + container: container, + } + if err := s.monitor.MonitorOOM(container); err != nil && err != runtime.ErrContainerExited { + logrus.WithField("error", err).Error("containerd: notify OOM events") + } + logrus.WithField("id", id).Debug("containerd: container restored") + var exitedProcesses []runtime.Process + for _, p := range processes { + if p.State() == runtime.Running { + if err := s.monitorProcess(p); err != nil { + return err + } + } else { + exitedProcesses = append(exitedProcesses, p) + } + } + if len(exitedProcesses) > 0 { + // sort processes so that init is fired last because that is how the kernel sends the + // exit events + sortProcesses(exitedProcesses) + for _, p := range exitedProcesses { + e := &ExitTask{ + Process: p, + } + s.SendTask(e) + } + } + } + return nil +} + +func (s *Supervisor) handleTask(i Task) { + var err error + switch t := i.(type) { + case *AddProcessTask: + err = s.addProcess(t) + case *CreateCheckpointTask: + err = s.createCheckpoint(t) + case *DeleteCheckpointTask: + err = s.deleteCheckpoint(t) + case *StartTask: + err = s.start(t) + case *DeleteTask: + err = s.delete(t) + case *ExitTask: + err = s.exit(t) + case *ExecExitTask: + err = s.execExit(t) + case *GetContainersTask: + err = s.getContainers(t) + case *SignalTask: + err = s.signal(t) + case *StatsTask: + err = s.stats(t) + case *UpdateTask: + err = s.updateContainer(t) + case *UpdateProcessTask: + err = s.updateProcess(t) + case *OOMTask: + err = s.oom(t) + default: + err = ErrUnknownTask + } + if err != errDeferredResponse { + i.ErrorCh() <- err + close(i.ErrorCh()) + } +} diff --git a/vendor/github.com/docker/containerd/supervisor/task.go b/vendor/github.com/docker/containerd/supervisor/task.go new file mode 100644 index 00000000..4d6d1c50 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/task.go @@ -0,0 +1,33 @@ +package supervisor + +import ( + "sync" + + "github.com/docker/containerd/runtime" +) + +// StartResponse is the response containing a started container +type StartResponse struct { + Container runtime.Container +} + +// Task executes an action returning an error chan with either nil or +// the error from executing the task +type Task interface { + // ErrorCh returns a channel used to report and error from an async task + ErrorCh() chan error +} + +type baseTask struct { + errCh chan error + mu sync.Mutex +} + +func (t *baseTask) ErrorCh() chan error { + t.mu.Lock() + defer t.mu.Unlock() + if t.errCh == nil { + t.errCh = make(chan error, 1) + } + return t.errCh +} diff --git a/vendor/github.com/docker/containerd/supervisor/types.go b/vendor/github.com/docker/containerd/supervisor/types.go new file mode 100644 index 00000000..2e36fce0 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/types.go @@ -0,0 +1,12 @@ +package supervisor + +// State constants used in Event types +const ( + StateStart = "start-container" + StatePause = "pause" + StateResume = "resume" + StateExit = "exit" + StateStartProcess = "start-process" + StateOOM = "oom" + StateLive = "live" +) diff --git a/vendor/github.com/docker/containerd/supervisor/update.go b/vendor/github.com/docker/containerd/supervisor/update.go new file mode 100644 index 00000000..fe2f4776 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/update.go @@ -0,0 +1,92 @@ +package supervisor + +import ( + "time" + + "github.com/docker/containerd/runtime" +) + +type UpdateTask struct { + baseTask + ID string + State runtime.State + Resources *runtime.Resource +} + +func (s *Supervisor) updateContainer(t *UpdateTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + container := i.container + if t.State != "" { + switch t.State { + case runtime.Running: + if err := container.Resume(); err != nil { + return err + } + s.notifySubscribers(Event{ + ID: t.ID, + Type: StateResume, + Timestamp: time.Now(), + }) + case runtime.Paused: + if err := container.Pause(); err != nil { + return err + } + s.notifySubscribers(Event{ + ID: t.ID, + Type: StatePause, + Timestamp: time.Now(), + }) + default: + return ErrUnknownContainerStatus + } + return nil + } + if t.Resources != nil { + return container.UpdateResources(t.Resources) + } + return nil +} + +type UpdateProcessTask struct { + baseTask + ID string + PID string + CloseStdin bool + Width int + Height int +} + +func (s *Supervisor) updateProcess(t *UpdateProcessTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + processes, err := i.container.Processes() + if err != nil { + return err + } + var process runtime.Process + for _, p := range processes { + if p.ID() == t.PID { + process = p + break + } + } + if process == nil { + return ErrProcessNotFound + } + if t.CloseStdin { + if err := process.CloseStdin(); err != nil { + return err + } + } + if t.Width > 0 || t.Height > 0 { + if err := process.Resize(t.Width, t.Height); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/worker.go b/vendor/github.com/docker/containerd/supervisor/worker.go new file mode 100644 index 00000000..f41818d5 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/worker.go @@ -0,0 +1,74 @@ +package supervisor + +import ( + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/runtime" +) + +type Worker interface { + Start() +} + +type startTask struct { + Container runtime.Container + Checkpoint string + Stdin string + Stdout string + Stderr string + Err chan error + StartResponse chan StartResponse +} + +func NewWorker(s *Supervisor, wg *sync.WaitGroup) Worker { + return &worker{ + s: s, + wg: wg, + } +} + +type worker struct { + wg *sync.WaitGroup + s *Supervisor +} + +func (w *worker) Start() { + defer w.wg.Done() + for t := range w.s.startTasks { + started := time.Now() + process, err := t.Container.Start(t.Checkpoint, runtime.NewStdio(t.Stdin, t.Stdout, t.Stderr)) + if err != nil { + logrus.WithFields(logrus.Fields{ + "error": err, + "id": t.Container.ID(), + }).Error("containerd: start container") + t.Err <- err + evt := &DeleteTask{ + ID: t.Container.ID(), + NoEvent: true, + } + w.s.SendTask(evt) + continue + } + if err := w.s.monitor.MonitorOOM(t.Container); err != nil && err != runtime.ErrContainerExited { + if process.State() != runtime.Stopped { + logrus.WithField("error", err).Error("containerd: notify OOM events") + } + } + if err := w.s.monitorProcess(process); err != nil { + logrus.WithField("error", err).Error("containerd: add process to monitor") + } + ContainerStartTimer.UpdateSince(started) + t.Err <- nil + t.StartResponse <- StartResponse{ + Container: t.Container, + } + w.s.notifySubscribers(Event{ + Timestamp: time.Now(), + ID: t.Container.ID(), + Type: StateStart, + }) + } +} diff --git a/vendor/github.com/docker/containerd/version.go b/vendor/github.com/docker/containerd/version.go new file mode 100644 index 00000000..d516b41e --- /dev/null +++ b/vendor/github.com/docker/containerd/version.go @@ -0,0 +1,11 @@ +package containerd + +import "fmt" + +const VersionMajor = 0 +const VersionMinor = 2 +const VersionPatch = 0 + +var Version = fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch) + +var GitCommit = "" diff --git a/vendor/github.com/docker/distribution/manifest/doc.go b/vendor/github.com/docker/distribution/manifest/doc.go new file mode 100644 index 00000000..88367b0a --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/doc.go @@ -0,0 +1 @@ +package manifest diff --git a/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go new file mode 100644 index 00000000..a2082ec0 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go @@ -0,0 +1,155 @@ +package manifestlist + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +// MediaTypeManifestList specifies the mediaType for manifest lists. +const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + +// SchemaVersion provides a pre-initialized version structure for this +// packages version of the manifest. +var SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifestList, +} + +func init() { + manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// PlatformSpec specifies a platform where a particular image manifest is +// applicable. +type PlatformSpec struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example `10.0.10586`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `ppc64le` to specify a little-endian version of a PowerPC CPU. + Variant string `json:"variant,omitempty"` + + // Features is an optional field specifying an array of strings, each + // listing a required CPU feature (for example `sse4` or `aes`). + Features []string `json:"features,omitempty"` +} + +// A ManifestDescriptor references a platform-specific manifest. +type ManifestDescriptor struct { + distribution.Descriptor + + // Platform specifies which platform the manifest pointed to by the + // descriptor runs on. + Platform PlatformSpec `json:"platform"` +} + +// ManifestList references manifests for various platforms. +type ManifestList struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Manifests []ManifestDescriptor `json:"manifests"` +} + +// References returnes the distribution descriptors for the referenced image +// manifests. +func (m ManifestList) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(m.Manifests)) + for i := range m.Manifests { + dependencies[i] = m.Manifests[i].Descriptor + } + + return dependencies +} + +// DeserializedManifestList wraps ManifestList with a copy of the original +// JSON. +type DeserializedManifestList struct { + ManifestList + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromDescriptors takes a slice of descriptors, and returns a +// DeserializedManifestList which contains the resulting manifest list +// and its JSON representation. +func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { + m := ManifestList{ + Versioned: SchemaVersion, + } + + m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) + copy(m.Manifests, descriptors) + + deserialized := DeserializedManifestList{ + ManifestList: m, + } + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new ManifestList struct from JSON data. +func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest list in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into ManifestList object + var manifestList ManifestList + if err := json.Unmarshal(m.canonical, &manifestList); err != nil { + return err + } + + m.ManifestList = manifestList + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifestList") +} + +// Payload returns the raw content of the manifest list. The contents can be +// used to calculate the content identifier. +func (m DeserializedManifestList) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go new file mode 100644 index 00000000..b3d1e554 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go @@ -0,0 +1,281 @@ +package schema1 + +import ( + "crypto/sha512" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +type diffID digest.Digest + +// gzippedEmptyTar is a gzip-compressed version of an empty tar file +// (1024 NULL bytes) +var gzippedEmptyTar = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// digestSHA256GzippedEmptyTar is the canonical sha256 digest of +// gzippedEmptyTar +const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +// configManifestBuilder is a type for constructing manifests from an image +// configuration and generic descriptors. +type configManifestBuilder struct { + // bs is a BlobService used to create empty layer tars in the + // blob store if necessary. + bs distribution.BlobService + // pk is the libtrust private key used to sign the final manifest. + pk libtrust.PrivateKey + // configJSON is configuration supplied when the ManifestBuilder was + // created. + configJSON []byte + // ref contains the name and optional tag provided to NewConfigManifestBuilder. + ref reference.Named + // descriptors is the set of descriptors referencing the layers. + descriptors []distribution.Descriptor + // emptyTarDigest is set to a valid digest if an empty tar has been + // put in the blob store; otherwise it is empty. + emptyTarDigest digest.Digest +} + +// NewConfigManifestBuilder is used to build new manifests for the current +// schema version from an image configuration and a set of descriptors. +// It takes a BlobService so that it can add an empty tar to the blob store +// if the resulting manifest needs empty layers. +func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder { + return &configManifestBuilder{ + bs: bs, + pk: pk, + configJSON: configJSON, + ref: ref, + } +} + +// Build produces a final manifest from the given references +func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { + type imageRootFS struct { + Type string `json:"type"` + DiffIDs []diffID `json:"diff_ids,omitempty"` + BaseLayer string `json:"base_layer,omitempty"` + } + + type imageHistory struct { + Created time.Time `json:"created"` + Author string `json:"author,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` + } + + type imageConfig struct { + RootFS *imageRootFS `json:"rootfs,omitempty"` + History []imageHistory `json:"history,omitempty"` + Architecture string `json:"architecture,omitempty"` + } + + var img imageConfig + + if err := json.Unmarshal(mb.configJSON, &img); err != nil { + return nil, err + } + + if len(img.History) == 0 { + return nil, errors.New("empty history when trying to create schema1 manifest") + } + + if len(img.RootFS.DiffIDs) != len(mb.descriptors) { + return nil, errors.New("number of descriptors and number of layers in rootfs must match") + } + + // Generate IDs for each layer + // For non-top-level layers, create fake V1Compatibility strings that + // fit the format and don't collide with anything else, but don't + // result in runnable images on their own. + type v1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + } + + fsLayerList := make([]FSLayer, len(img.History)) + history := make([]History, len(img.History)) + + parent := "" + layerCounter := 0 + for i, h := range img.History[:len(img.History)-1] { + var blobsum digest.Digest + if h.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + layerCounter++ + } + + v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex() + + if i == 0 && img.RootFS.BaseLayer != "" { + // windows-only baselayer setup + baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer)) + parent = fmt.Sprintf("%x", baseID[:32]) + } + + v1Compatibility := v1Compatibility{ + ID: v1ID, + Parent: parent, + Comment: h.Comment, + Created: h.Created, + } + v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} + if h.EmptyLayer { + v1Compatibility.ThrowAway = true + } + jsonBytes, err := json.Marshal(&v1Compatibility) + if err != nil { + return nil, err + } + + reversedIndex := len(img.History) - i - 1 + history[reversedIndex].V1Compatibility = string(jsonBytes) + fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum} + + parent = v1ID + } + + latestHistory := img.History[len(img.History)-1] + + var blobsum digest.Digest + if latestHistory.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + } + + fsLayerList[0] = FSLayer{BlobSum: blobsum} + dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON))) + + // Top-level v1compatibility string should be a modified version of the + // image config. + transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer) + if err != nil { + return nil, err + } + + history[0].V1Compatibility = string(transformedConfig) + + tag := "" + if tagged, isTagged := mb.ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + mfst := Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: mb.ref.Name(), + Tag: tag, + Architecture: img.Architecture, + FSLayers: fsLayerList, + History: history, + } + + return Sign(&mfst, mb.pk) +} + +// emptyTar pushes a compressed empty tar to the blob store if one doesn't +// already exist, and returns its blobsum. +func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { + if mb.emptyTarDigest != "" { + // Already put an empty tar + return mb.emptyTarDigest, nil + } + + descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar) + switch err { + case nil: + mb.emptyTarDigest = descriptor.Digest + return descriptor.Digest, nil + case distribution.ErrBlobUnknown: + // nop + default: + return "", err + } + + // Add gzipped empty tar to the blob store + descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar) + if err != nil { + return "", err + } + + mb.emptyTarDigest = descriptor.Digest + + return descriptor.Digest, nil +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { + // todo: verification here? + mb.descriptors = append(mb.descriptors, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder +func (mb *configManifestBuilder) References() []distribution.Descriptor { + return mb.descriptors +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON +func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(configJSON, &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + delete(configAsMap, "rootfs") + delete(configAsMap, "history") + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/manifest.go b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go new file mode 100644 index 00000000..bff47bde --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go @@ -0,0 +1,184 @@ +package schema1 + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/libtrust" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. Note + // that for schema version 1, the the media is optionally "application/json". + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" + // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version + MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // MediaTypeManifestLayer specifies the media type for manifest layers + MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 1, + } +) + +func init() { + schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + sm := new(SignedManifest) + err := sm.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + desc := distribution.Descriptor{ + Digest: digest.FromBytes(sm.Canonical), + Size: int64(len(sm.Canonical)), + MediaType: MediaTypeSignedManifest, + } + return sm, desc, err + } + err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("application/json", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// FSLayer is a container struct for BlobSums defined in an image manifest +type FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// History stores unstructured v1 compatibility information +type History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + +// Manifest provides the base accessible fields for working with V2 image +// format in the registry. +type Manifest struct { + manifest.Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to + // run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []History `json:"history"` +} + +// SignedManifest provides an envelope for a signed image manifest, including +// the format sensitive raw bytes. +type SignedManifest struct { + Manifest + + // Canonical is the canonical byte representation of the ImageManifest, + // without any attached signatures. The manifest byte + // representation cannot change or it will have to be re-signed. + Canonical []byte `json:"-"` + + // all contains the byte representation of the Manifest including signatures + // and is returned by Payload() + all []byte +} + +// UnmarshalJSON populates a new SignedManifest struct from JSON data. +func (sm *SignedManifest) UnmarshalJSON(b []byte) error { + sm.all = make([]byte, len(b), len(b)) + // store manifest and signatures in all + copy(sm.all, b) + + jsig, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return err + } + + // Resolve the payload in the manifest. + bytes, err := jsig.Payload() + if err != nil { + return err + } + + // sm.Canonical stores the canonical manifest JSON + sm.Canonical = make([]byte, len(bytes), len(bytes)) + copy(sm.Canonical, bytes) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { + return err + } + + sm.Manifest = manifest + + return nil +} + +// References returnes the descriptors of this manifests references +func (sm SignedManifest) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) + for i, fsLayer := range sm.FSLayers { + dependencies[i] = distribution.Descriptor{ + MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", + Digest: fsLayer.BlobSum, + } + } + + return dependencies + +} + +// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner +// contents. Applications requiring a marshaled signed manifest should simply +// use Raw directly, since the the content produced by json.Marshal will be +// compacted and will fail signature checks. +func (sm *SignedManifest) MarshalJSON() ([]byte, error) { + if len(sm.all) > 0 { + return sm.all, nil + } + + // If the raw data is not available, just dump the inner content. + return json.Marshal(&sm.Manifest) +} + +// Payload returns the signed content of the signed manifest. +func (sm SignedManifest) Payload() (string, []byte, error) { + return MediaTypeSignedManifest, sm.all, nil +} + +// Signatures returns the signatures as provided by +// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws +// signatures. +func (sm *SignedManifest) Signatures() ([][]byte, error) { + jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + // Resolve the payload in the manifest. + return jsig.Signatures() +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go new file mode 100644 index 00000000..fc1045f9 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go @@ -0,0 +1,98 @@ +package schema1 + +import ( + "fmt" + + "errors" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +// referenceManifestBuilder is a type for constructing manifests from schema1 +// dependencies. +type referenceManifestBuilder struct { + Manifest + pk libtrust.PrivateKey +} + +// NewReferenceManifestBuilder is used to build new manifests for the current +// schema version using schema1 dependencies. +func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder { + tag := "" + if tagged, isTagged := ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + return &referenceManifestBuilder{ + Manifest: Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: ref.Name(), + Tag: tag, + Architecture: architecture, + }, + pk: pk, + } +} + +func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) { + m := mb.Manifest + if len(m.FSLayers) == 0 { + return nil, errors.New("cannot build manifest with zero layers or history") + } + + m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers)) + m.History = make([]History, len(mb.Manifest.History)) + copy(m.FSLayers, mb.Manifest.FSLayers) + copy(m.History, mb.Manifest.History) + + return Sign(&m, mb.pk) +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { + r, ok := d.(Reference) + if !ok { + return fmt.Errorf("Unable to add non-reference type to v1 builder") + } + + // Entries need to be prepended + mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) + mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) + return nil + +} + +// References returns the current references added to this builder +func (mb *referenceManifestBuilder) References() []distribution.Descriptor { + refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers)) + for i := range mb.Manifest.FSLayers { + layerDigest := mb.Manifest.FSLayers[i].BlobSum + history := mb.Manifest.History[i] + ref := Reference{layerDigest, 0, history} + refs[i] = ref.Descriptor() + } + return refs +} + +// Reference describes a manifest v2, schema version 1 dependency. +// An FSLayer associated with a history entry. +type Reference struct { + Digest digest.Digest + Size int64 // if we know it, set it for the descriptor. + History History +} + +// Descriptor describes a reference +func (r Reference) Descriptor() distribution.Descriptor { + return distribution.Descriptor{ + MediaType: MediaTypeManifestLayer, + Digest: r.Digest, + Size: r.Size, + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/sign.go b/vendor/github.com/docker/distribution/manifest/schema1/sign.go new file mode 100644 index 00000000..c862dd81 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/sign.go @@ -0,0 +1,68 @@ +package schema1 + +import ( + "crypto/x509" + "encoding/json" + + "github.com/docker/libtrust" +) + +// Sign signs the manifest with the provided private key, returning a +// SignedManifest. This typically won't be used within the registry, except +// for testing. +func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.Sign(pk); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} + +// SignWithChain signs the manifest with the given private key and x509 chain. +// The public key of the first element in the chain must be the public key +// corresponding with the sign key. +func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.SignWithChain(key, chain); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/verify.go b/vendor/github.com/docker/distribution/manifest/schema1/verify.go new file mode 100644 index 00000000..fa8daa56 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/verify.go @@ -0,0 +1,32 @@ +package schema1 + +import ( + "crypto/x509" + + "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" +) + +// Verify verifies the signature of the signed manifest returning the public +// keys used during signing. +func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") + return nil, err + } + + return js.Verify() +} + +// VerifyChains verifies the signature of the signed manifest against the +// certificate pool returning the list of verified chains. Signatures without +// an x509 chain are not checked. +func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + return js.VerifyChains(ca) +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/builder.go b/vendor/github.com/docker/distribution/manifest/schema2/builder.go new file mode 100644 index 00000000..44b94eaa --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/builder.go @@ -0,0 +1,77 @@ +package schema2 + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// builder is a type for constructing manifests. +type builder struct { + // bs is a BlobService used to publish the configuration blob. + bs distribution.BlobService + + // configJSON references + configJSON []byte + + // layers is a list of layer descriptors that gets built by successive + // calls to AppendReference. + layers []distribution.Descriptor +} + +// NewManifestBuilder is used to build new manifests for the current schema +// version. It takes a BlobService so it can publish the configuration blob +// as part of the Build process. +func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder { + mb := &builder{ + bs: bs, + configJSON: make([]byte, len(configJSON)), + } + copy(mb.configJSON, configJSON) + + return mb +} + +// Build produces a final manifest from the given references. +func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { + m := Manifest{ + Versioned: SchemaVersion, + Layers: make([]distribution.Descriptor, len(mb.layers)), + } + copy(m.Layers, mb.layers) + + configDigest := digest.FromBytes(mb.configJSON) + + var err error + m.Config, err = mb.bs.Stat(ctx, configDigest) + switch err { + case nil: + return FromStruct(m) + case distribution.ErrBlobUnknown: + // nop + default: + return nil, err + } + + // Add config to the blob store + m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = MediaTypeConfig + if err != nil { + return nil, err + } + + return FromStruct(m) +} + +// AppendReference adds a reference to the current ManifestBuilder. +func (mb *builder) AppendReference(d distribution.Describable) error { + mb.layers = append(mb.layers, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder. +func (mb *builder) References() []distribution.Descriptor { + return mb.layers +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go new file mode 100644 index 00000000..8d378e99 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go @@ -0,0 +1,125 @@ +package schema2 + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" + + // MediaTypeConfig specifies the mediaType for the image configuration. + MediaTypeConfig = "application/vnd.docker.container.image.v1+json" + + // MediaTypeLayer is the mediaType used for layers referenced by the + // manifest. + MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifest, + } +) + +func init() { + schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifest) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// Manifest defines a schema2 manifest. +type Manifest struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Config distribution.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []distribution.Descriptor `json:"layers"` +} + +// References returnes the descriptors of this manifests references. +func (m Manifest) References() []distribution.Descriptor { + return m.Layers + +} + +// Target returns the target of this signed manifest. +func (m Manifest) Target() distribution.Descriptor { + return m.Config +} + +// DeserializedManifest wraps Manifest with a copy of the original JSON. +// It satisfies the distribution.Manifest interface. +type DeserializedManifest struct { + Manifest + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromStruct takes a Manifest structure, marshals it to JSON, and returns a +// DeserializedManifest which contains the manifest and its JSON representation. +func FromStruct(m Manifest) (*DeserializedManifest, error) { + var deserialized DeserializedManifest + deserialized.Manifest = m + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new Manifest struct from JSON data. +func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(m.canonical, &manifest); err != nil { + return err + } + + m.Manifest = manifest + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifest") +} + +// Payload returns the raw content of the manifest. The contents can be used to +// calculate the content identifier. +func (m DeserializedManifest) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/versioned.go b/vendor/github.com/docker/distribution/manifest/versioned.go new file mode 100644 index 00000000..c57398bd --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/versioned.go @@ -0,0 +1,12 @@ +package manifest + +// Versioned provides a struct with the manifest schemaVersion and . Incoming +// content with unknown schema version can be decoded against this struct to +// check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` +} diff --git a/vendor/github.com/docker/docker/.dockerignore b/vendor/github.com/docker/docker/.dockerignore index 9bd2c021..6e43c2a9 100644 --- a/vendor/github.com/docker/docker/.dockerignore +++ b/vendor/github.com/docker/docker/.dockerignore @@ -1,3 +1,4 @@ -bundles -.gopath -vendor/pkg +./bin +./.dapper +./dist +./.trash-cache diff --git a/vendor/github.com/docker/docker/.gitignore b/vendor/github.com/docker/docker/.gitignore index 43aa9227..d43bb90f 100644 --- a/vendor/github.com/docker/docker/.gitignore +++ b/vendor/github.com/docker/docker/.gitignore @@ -1,30 +1,4 @@ -# Docker project generated files to ignore -# if you want to ignore files created by your editor/tools, -# please consider a global .gitignore https://help.github.com/articles/ignoring-files -*.exe -*.exe~ -*.orig -*.test -.*.swp -.DS_Store -# a .bashrc may be added to customize the build environment -.bashrc -.gopath/ -autogen/ -bundles/ -cmd/dockerd/dockerd -cmd/docker/docker -dockerversion/version_autogen.go -docs/AWS_S3_BUCKET -docs/GITCOMMIT -docs/GIT_BRANCH -docs/VERSION -docs/_build -docs/_static -docs/_templates -docs/changed-files -# generated by man/md2man-all.sh -man/man1 -man/man5 -man/man8 -vendor/pkg/ +/.dapper +/bin +*.swp +/.trash-cache diff --git a/vendor/github.com/docker/docker/.mailmap b/vendor/github.com/docker/docker/.mailmap deleted file mode 100644 index 685f8e6b..00000000 --- a/vendor/github.com/docker/docker/.mailmap +++ /dev/null @@ -1,237 +0,0 @@ -# Generate AUTHORS: hack/generate-authors.sh - -# Tip for finding duplicates (besides scanning the output of AUTHORS for name -# duplicates that aren't also email duplicates): scan the output of: -# git log --format='%aE - %aN' | sort -uf -# -# For explanation on this file format: man git-shortlog - -Patrick Stapleton -Shishir Mahajan -Erwin van der Koogh -Ahmed Kamal -Tejesh Mehta -Cristian Staretu -Cristian Staretu -Cristian Staretu -Marcus Linke -Aleksandrs Fadins -Christopher Latham -Hu Keping -Wayne Chang -Chen Chao -Daehyeok Mun - - - - - - -Guillaume J. Charmes - - - - - -Thatcher Peskens -Thatcher Peskens -Thatcher Peskens dhrp -Jérôme Petazzoni jpetazzo -Jérôme Petazzoni -Joffrey F -Joffrey F -Joffrey F -Tim Terhorst -Andy Smith - - - - - - - - - -Walter Stanish - -Roberto Hashioka -Konstantin Pelykh -David Sissitka -Nolan Darilek - -Benoit Chesneau -Jordan Arentsen -Daniel Garcia -Miguel Angel Fernández -Bhiraj Butala -Faiz Khan -Victor Lyuboslavsky -Jean-Baptiste Barth -Matthew Mueller - -Shih-Yuan Lee -Daniel Mizyrycki root -Jean-Baptiste Dalido - - - - - - - - - - - - - - -Sven Dowideit -Sven Dowideit -Sven Dowideit -Sven Dowideit <¨SvenDowideit@home.org.au¨> -Sven Dowideit -Sven Dowideit - -Alexander Morozov -Alexander Morozov - -O.S. Tezer - -Roberto G. Hashioka - - - - - -Sridhar Ratnakumar -Sridhar Ratnakumar -Liang-Chi Hsieh -Aleksa Sarai -Aleksa Sarai -Aleksa Sarai -Will Weaver -Timothy Hobbs -Nathan LeClaire -Nathan LeClaire - - - - -Matthew Heon - - - - -Francisco Carriedo - - - - -Brian Goff - - - -Hollie Teal - - - -Jessica Frazelle -Jessica Frazelle -Jessica Frazelle -Jessica Frazelle -Jessica Frazelle - - - - -Thomas LEVEIL Thomas LÉVEIL - - -Antonio Murdaca -Antonio Murdaca -Antonio Murdaca -Antonio Murdaca -Antonio Murdaca -Darren Shepherd -Deshi Xiao -Deshi Xiao -Doug Davis -Jacob Atzen -Jeff Nickoloff -John Howard (VM) -John Howard (VM) -John Howard (VM) -Madhu Venugopal -Mary Anthony -Mary Anthony moxiegirl -Mary Anthony -mattyw -resouer -AJ Bowen soulshake -AJ Bowen soulshake -Tibor Vass -Tibor Vass -Vincent Bernat -Yestin Sun -bin liu -John Howard (VM) jhowardmsft -Ankush Agarwal -Tangi COLIN tangicolin -Allen Sun -Adrien Gallouët - -Anuj Bahuguna -Anusha Ragunathan -Avi Miller -Brent Salisbury -Chander G -Chun Chen -Ying Li -Daehyeok Mun - -Daniel, Dao Quang Minh -Daniel Nephin -Dave Tucker -Doug Tangren -Frederick F. Kautz IV -Ben Golub -Harold Cooper -hsinko <21551195@zju.edu.cn> -Josh Hawn -Justin Cormack - - -Kamil Domański -Lei Jitang - -Linus Heckemann - -Lynda O'Leary - -Marianna Tessel -Michael Huettermann -Moysés Borges - -Nigel Poulton -Qiang Huang - -Boaz Shuster -Shuwei Hao - -Soshi Katsuta - -Stefan Berger - -Stephen Day - -Toli Kuznets -Tristan Carel - -Vincent Demeester - -Vishnu Kannan -xlgao-zju xlgao -yuchangchun y00277921 - - diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS deleted file mode 100644 index bfb128ca..00000000 --- a/vendor/github.com/docker/docker/AUTHORS +++ /dev/null @@ -1,1460 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Huslage -Aaron Lehmann -Aaron Welch -Abel Muiño -Abhijeet Kasurde -Abhinav Ajgaonkar -Abhishek Chanda -Abin Shahab -Adam Miller -Adam Singer -Aditi Rajagopal -Aditya -Adria Casas -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akihiro Suda -Al Tobey -alambike -Alan Thompson -Albert Callarisa -Albert Zhang -Aleksa Sarai -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Crawford -Alex Gaynor -Alex Samorukov -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Morozov -Alexander Shopov -Alexandre Beslic -Alexandre González -Alexandru Sfirlogea -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis THOMAS -Allen Madsen -Allen Sun -almoehi -Alvin Richards -amangoel -Amen Belayneh -Amit Bakshi -Amit Krishnan -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anchal Agrawal -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Luzzardi -Andrea Turli -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew Munsell -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Petrov -Andrey Stolbovsky -André Martins -andy -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Rothfusz -Andy Smith -Andy Wilson -Anes Hasicic -Anil Belur -Ankush Agarwal -Anonmily -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Murdaca -Antony Messerli -Anuj Bahuguna -Anusha Ragunathan -apocas -ArikaChen -Arnaud Porterie -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asbjørn Enge -averagehuman -Avi Das -Avi Miller -ayoshitake -Azat Khuyiyakhmetov -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -Bastiaan Bakker -bdevloed -Ben Firshman -Ben Golub -Ben Hall -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benoit Chesneau -Bernerd Schaefer -Bert Goethals -Bharath Thiruveedula -Bhiraj Butala -Bill W -bin liu -Blake Geno -Boaz Shuster -bobby abbott -boucher -Bouke Haarsma -Boyd Hemphill -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brent Salisbury -Brett Kochendorfer -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Shumate -Brian Torres-Gil -Brice Jaglin -Briehan Lombaard -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -buddhamagnet -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Cameron Boehmer -Cameron Spear -Campbell Allen -Candid Dauth -Carl Henrik Lunde -Carl X. Su -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Cedric Davies -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander G -Charles Chan -Charles Hooper -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charlie Lewis -Chase Bolt -ChaYoung You -Chen Chao -Chen Hanxiao -cheney90 -Chewey -Chia-liang Kao -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dituri -Chris Fordham -Chris Khoo -Chris McKinnel -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Wahl -Chris Weyl -chrismckinnel -Christian Berendt -Christian Böhme -Christian Rotzoll -Christian Simon -Christian Stefanescu -ChristoperBiscardi -Christophe Mehay -Christophe Troestler -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Rigor -Christy Perez -Chun Chen -Ciro S. Costa -Clayton Coleman -Clinton Kitson -Coenraad Loubser -Colin Dunklau -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Cory Forsyth -cressie176 -Cristian Staretu -cristiano balducci -Cruceru Calin-Cristian -Cyril F -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Stine -Dan Walsh -Dan Williams -Daniel Antlinger -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Hiltgen -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel Robinson -Daniel S -Daniel Von Fange -Daniel YC Lin -Daniel Zhang -Daniel, Dao Quang Minh -Danny Berger -Danny Yates -Darren Coxall -Darren Shepherd -Darren Stahl -Dave Barboza -Dave Henderson -Dave Tucker -David Anderson -David Calavera -David Corking -David Cramer -David Currie -David Davis -David Gageot -David Gebler -David Lawrence -David Mackey -David Mat -David Mcanulty -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Xia -David Young -Davide Ceretti -Dawn Chen -dcylabs -decadent -Deng Guangxing -Deni Bertovic -Denis Gladkikh -Denis Ollier -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -devmeyster -Devvyn Murphy -Dharmit Shah -Dieter Reuter -Dima Stopel -Dimitri John Ledkov -Dinesh Subhraveti -Diogo Monica -DiuDiugirl -Djibril Koné -dkumor -Dmitry Demeshchuk -Dmitry Gusev -Dmitry V. Krivenok -Dmitry Vorobev -Dolph Mathews -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donovan Jones -Doug Davis -Doug MacEachern -Doug Tangren -Dr Nic Williams -dragon788 -Dražen Lučanin -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivind Uggedal -Elan Ruusamäe -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Enguerran -Eohyung Lee -Eric Hanchrow -Eric Lee -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Windisch -Eric-Olivier Lamey -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Euan -Eugene Yakubovich -eugenkrizo -evalle -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Phoenix -Evan Wies -Evgeny Vereshchagin -Ewa Czechowska -Eystein Måløy Stenberg -ezbercih -Fabiano Rosas -Fabio Falci -Fabio Rehm -Fabrizio Regini -Faiz Khan -falmp -Fangyuan Gao <21551127@zju.edu.cn> -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Schindler -Ferenc Szabo -Fernando -Fero Volar -Filipe Brandenburger -Filipe Oliveira -fl0yd -Flavio Castelli -FLGMwt -Florian Klein -Florian Maier -Florian Weingarten -Florin Asavoaie -Francesc Campoy -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -Fred Lifton -Frederick F. Kautz IV -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Monroy -GabrielNicolasAvellaneda -Galen Sampson -Gareth Rushgrove -Garrett Barboza -Gaurav -gautam, prasanna -GennadySpb -Geoffrey Bachelet -George MacRorie -George Xie -Georgi Hristozov -Gereon Frey -German DZ -Gert van Valkenhoef -Gianluca Borello -Gildas Cuisinier -gissehel -Giuseppe Mazzotta -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Thornton -grossws -grunny -gs11 -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -guoxiuyan -Gurjeet Singh -Guruprasad -gwx296173 -Günter Zöchbauer -Hans Kristian Flaatten -Hans Rødtang -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harley Laue -Harold Cooper -Harry Zhang -He Simei -heartlock <21521209@zju.edu.cn> -Hector Castro -Henning Sprang -Hobofan -Hollie Teal -Hong Xu -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -Huanzhong Zhang -Huayi Zhang -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hunter Blanks -huqun -Huu Nguyen -hyeongkyu.lee -hyp3rdino -Ian Babrou -Ian Bishop -Ian Bull -Ian Calvert -Ian Lee -Ian Main -Ian Truslove -Iavael -Icaro Seara -Igor Dolzhikov -Ilkka Laukkanen -Ilya Dmitrichenko -Ilya Gusev -ILYA Khlopotov -imre Fitos -inglesp -Ingo Gottwald -Isaac Dupree -Isabel Jimenez -Isao Jonas -Ivan Babrou -Ivan Fraixedes -J Bruni -J. Nunn -Jack Danger Canty -Jacob Atzen -Jacob Edelman -Jake Champlin -Jake Moshenko -jakedt -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nugent -James Turnbull -Jamie Hannaford -Jamshid Afshar -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Januar Wayong -Jared Biel -Jaroslaw Zabiello -jaseg -Jasmine Hegman -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -jaxgeller -Jay -Jay -Jay Kamat -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Paul Calderone -Jean-Tiare Le Bigot -Jeff Anderson -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Welch -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeremy Grosser -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jezeniel Zapanta -jgeiger -Jian Zhang -jianbosun -Jilles Oldenbeuving -Jim Alateras -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -jimmyxian -Jinsoo Park -Jiri Popelka -Jiří Župka -jjy -jmzwcn -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Howard (VM) -John OBrien III -John Starks -John Tims -John Warwick -John Willis -Jon Wedaman -Jonas Pfenniger -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Dowland -Jonathan Lebon -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Joost Cassee -Jordan -Jordan Arentsen -Jordan Sissel -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Josh -Josh Hawn -Josh Poimboeuf -Josiah Kiehl -José Tomás Albornoz -JP -jrabbit -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Pervillé -Jun-Ru Chang -Jussi Nummelin -Justas Brazauskas -Justin Cormack -Justin Force -Justin Plock -Justin Simonelis -Jyrki Puttonen -Jérôme Petazzoni -Jörg Thalheim -Kai Blin -Kai Qiang Wu(Kennan) -Kamil Domański -Kanstantsin Shautsou -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -kayrus -Keli Hu -Ken Cochrane -Ken ICHIKAWA -Kenfe-Mickael Laventure -Kenjiro Nakayama -Kent Johnson -Kevin "qwazerty" Houdebert -Kevin Clark -Kevin J. Lynagh -Kevin Menard -Kevin P. Kucharczyk -Kevin Shi -Kevin Wallace -Kevin Yap -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill SIbirev -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konstantin Pelykh -Krasimir Georgiev -Kristian Haugene -Kristina Zabunova -krrg -Kun Zhang -Kunal Kushwaha -Kyle Conroy -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -lalyos -Lance Chen -Lance Kinley -Lars Kellogg-Stedman -Lars R. Damerow -Laszlo Meszaros -Laurent Erignoux -Laurie Voss -Leandro Siqueira -Lee, Meng-Han -leeplay -Lei Jitang -Len Weincier -Lennie -Leszek Kowalski -Levi Blackstone -Levi Gross -Lewis Marshall -Lewis Peckover -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -liaoqingwei -limsy -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -LIZAO LI -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Louis Opter -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Luis Martínez de Bartolomé Izquierdo -Lukas Waslowski -lukaspustina -lukemarsden -Lynda O'Leary -Lénaïc Huard -Ma Shimiao -Mabin -Madhav Puri -Madhu Venugopal -Mageee <21521230.zju.edu.cn> -Mahesh Tiyyagura -malnick -Malte Janduda -manchoz -Manfred Touron -Manfred Zabarauskas -Manuel Meurer -Manuel Woelker -mapk0y -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcelo Salazar -Marco Hennings -Marcus Farkas -Marcus Linke -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark McGranaghan -Mark McKinstry -Mark West -Marko Mikulicic -Marko Tibold -Markus Fix -Martijn Dwars -Martijn van Oosterhout -Martin Honermeyer -Martin Kelly -Martin Mosegaard Amdisen -Martin Redmond -Mary Anthony -Masahito Zembutsu -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Le Marec - Pasquet -Matt Apperson -Matt Bachmann -Matt Bentley -Matt Haggard -Matt McCormick -Matt Moore -Matt Robenolt -Matthew Heon -Matthew Mayer -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Hauglustaine -mattymo -mattyw -Mauricio Garavaglia -mauriyouth -Max Shytikov -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mengdi Gao -Mert Yazıcıoğlu -Micah Zoltu -Michael A. Smith -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Gorsuch -Michael Grauer -Michael Hudson-Doyle -Michael Huettermann -Michael Käufl -Michael Neale -Michael Prokop -Michael Scharf -Michael Stapelberg -Michael Steinert -Michael Thies -Michael West -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Minar -Michaël Pailloncy -Michał Czeraszkiewicz -Michiel@unhosted -Miguel Angel Fernández -Miguel Morales -Mihai Borobocea -Mihuleacc Sergiu -Mike Brown -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Gaffney -Mike Goelzer -Mike Leone -Mike MacCana -Mike Naberezny -Mike Snitzer -Mikhail Sobolev -Miloslav Trmač -mingqing -Mingzhen Feng -Mitch Capper -mlarcher -Mohammad Banikazemi -Mohammed Aaqib Ansari -Mohit Soni -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mqliang -Mrunal Patel -msabansal -mschurenko -Mustafa Akın -Muthukumar R -Médi-Rémi Hashim -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Neal McBurnett -Nelson Chen -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick Irvine -Nick Parker -Nick Payne -Nick Stenning -Nick Stinemates -Nicolas Borboën -Nicolas De loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolás Hock Isaza -Nigel Poulton -NikolaMandic -nikolas -Nishant Totla -NIWA Hideyuki -noducks -Nolan Darilek -nponeccop -Nuutti Kotivuori -nzwsch -O.S. Tezer -OddBloke -odk- -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -Ole Reifschneider -Oliver Neal -Olivier Gambier -Olle Jonsson -Oriol Francès -Otto Kekäläinen -oyld -ozlerhakan -paetling -pandrew -panticz -Paolo G. Giarrusso -Pascal Borreli -Pascal Hartig -Patrick Devine -Patrick Hemmer -Patrick Stapleton -pattichen -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Hammond -Paul Jimenez -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Weaver -Pavel Lobashov -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Peeyush Gupta -Peggy Li -Pei Su -Penghan Wang -perhapszzy@sina.com -Peter Bourgon -Peter Braden -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Phil -Phil Estes -Phil Spitler -Philip Monroe -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -pixelistik -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Prasanna Gautam -Prayag Verma -Przemek Hejman -pysqz -qg <1373319223@qq.com> -qhuang -Qiang Huang -qq690388648 <690388648@qq.com> -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Rajat Pandit -Rajdeep Dua -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon van Alteren -Ray Tsang -ReadmeCritic -Recursive Madman -Regan McCooey -Remi Rampin -Renato Riccieri Santos Zannon -resouer -rgstephens -Rhys Hiltner -Rich Seymour -Richard -Richard Burnison -Richard Harvey -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Riyaz Faizullabhoy -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Stern -Robert Wallis -Roberto G. Hashioka -Robin Naundorf -Robin Schneider -Robin Speekenbrink -robpc -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Roland Huß -Roland Moriz -Roma Sokolov -Roman Strashkin -Ron Smits -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Rozhnov Alexandr -rsmoorthy -Rudolph Gottesheim -Rui Lopes -Ryan Anderson -Ryan Aslett -Ryan Belgrave -Ryan Detzel -Ryan Fowler -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -RyanDeng -Rémy Greinhofer -s. rannou -s00318865 -Sabin Basyal -Sachin Joshi -Sagar Hani -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -Sankar சங்கர் -Sanket Saurav -Santhosh Manohar -sapphiredev -Satnam Singh -satoru -Satoshi Amemiya -scaleoutsean -Scott Bessler -Scott Collier -Scott Johnston -Scott Stamp -Scott Walls -sdreyesg -Sean Cronin -Sean OMeara -Sean P. Kane -Sebastiaan van Steenis -Sebastiaan van Stijn -Senthil Kumar Selvaraj -SeongJae Park -Seongyeol Lim -Sergey Alekseev -Sergey Evstifeev -Sevki Hasirci -Shane Canon -Shane da Silva -shaunol -Shawn Landden -Shawn Siefkas -Shekhar Gulati -Sheng Yang -Shih-Yuan Lee -Shijiang Wei -Shishir Mahajan -shuai-z -Shuwei Hao -Sian Lerk Lau -sidharthamani -Silas Sewell -Simei He -Simon Eskildsen -Simon Leinen -Simon Taranto -Sindhu S -Sjoerd Langkemper -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Soulou -Spencer Brown -Spencer Smith -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -Srini Brahmaroutu -Steeve Morin -Stefan Berger -Stefan J. Wernli -Stefan Praszalowicz -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Stephen Crosby -Stephen Day -Stephen Rust -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -Sujith Haridasan -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien Luttringer -Sébastien Stormacq -TAGOMORI Satoshi -tang0th -Tangi COLIN -Tatsuki Sugiura -Tatsushi Inagaki -Taylor Jones -tbonza -Ted M. Young -Tehmasp Chaudhri -Tejesh Mehta -terryding77 <550147740@qq.com> -tgic -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Hansen -Thomas LEVEIL -Thomas Orozco -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Texier -Tianon Gravi -Tibor Vass -Tiffany Low -Tim Bosse -Tim Dettrick -Tim Hockin -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wang -Tim Waugh -Tim Wraight -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom X. Tobin -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tommaso Visconti -Tomáš Hrčka -Tonis Tiigi -Tonny Xu -Tony Daws -Tony Miller -toogley -Torstein Husebø -tpng -tracylihui <793912329@qq.com> -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -trishnaguha -Tristan Carel -Troy Denton -Tyler Brock -Tzu-Jung Lee -Tõnis Tiigi -Ulysse Carion -unknown -vagrant -Vaidas Jablonskis -Veres Lajos -vgeta -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Bernat -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vojtech Vitek (V-Teq) -waitingkuo -Walter Leibbrandt -Walter Stanish -WANG Chao -Ward Vandewege -WarheadsSE -Wayne Chang -Wei-Ting Kuo -weiyan -Weiyang Zhu -Wen Cheng Ma -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wes Morgan -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Riancho -William Thurston -WiseTrem -wlan0 -Wolfgang Powisch -wonderflow -xamyzhao -XiaoBing Jiang -Xiaoxu Chen -xiekeyang -Xinzi Zhou -Xiuming Chen -xlgao-zju -xuzhaokui -Yahya -YAMADA Tsuyoshi -Yan Feng -Yang Bai -yangshukui -Yasunori Mahata -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongzhi Pan -Youcef YEKHLEF -Yuan Sun -yuchangchun -yuchengxia -Yurii Rashkovskii -yuzou -Zac Dover -Zach Borboa -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -Zhenan Ye <21551168@zju.edu.cn> -Zhu Guihua -Zhuoyun Wei -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -zqh -Zuhayr Elahi -Álex González -Álvaro Lázaro -Átila Camurça Alves -尹吉峰 diff --git a/vendor/github.com/docker/docker/CHANGELOG.md b/vendor/github.com/docker/docker/CHANGELOG.md deleted file mode 100644 index ffdeccfb..00000000 --- a/vendor/github.com/docker/docker/CHANGELOG.md +++ /dev/null @@ -1,2415 +0,0 @@ -# Changelog - -Items starting with `DEPRECATE` are important deprecation notices. For more -information on the list of deprecated flags and APIs please have a look at -https://docs.docker.com/engine/deprecated/ where target removal dates can also -be found. - -## 1.11.1 (2016-04-26) - -### Distribution - -- Fix schema2 manifest media type to be of type `application/vnd.docker.container.image.v1+json` ([#21949](https://github.com/docker/docker/pull/21949)) - -### Documentation - -+ Add missing API documentation for changes introduced with 1.11.0 ([#22048](https://github.com/docker/docker/pull/22048)) - -### Builder - -* Append label passed to `docker build` as arguments as an implicit `LABEL` command at the end of the processed `Dockerfile` ([#22184](https://github.com/docker/docker/pull/22184)) - -### Networking - -- Fix a panic that would occur when forwarding DNS query ([#22261](https://github.com/docker/docker/pull/22261)) -- Fix an issue where OS threads could end up within an incorrect network namespace when using user defined networks ([#22261](https://github.com/docker/docker/pull/22261)) - -### Runtime - -- Fix a bug preventing labels configuration to be reloaded via the config file ([#22299](https://github.com/docker/docker/pull/22299)) -- Fix a regression where container mounting `/var/run` would prevent other containers from being removed ([#22256](https://github.com/docker/docker/pull/22256)) -- Fix an issue where it would be impossible to update both `memory-swap` and `memory` value together ([#22255](https://github.com/docker/docker/pull/22255)) -- Fix a regression from 1.11.0 where the `/auth` endpoint would not initialize `serveraddress` if it is not provided ([#22254](https://github.com/docker/docker/pull/22254)) -- Add missing cleanup of container temporary files when cancelling a schedule restart ([#22237](https://github.com/docker/docker/pull/22237)) -- Remove scary error message when no restart policy is specified ([#21993](https://github.com/docker/docker/pull/21993)) -- Fix a panic that would occur when the plugins were activated via the json spec ([#22191](https://github.com/docker/docker/pull/22191)) -- Fix restart backoff logic to correctly reset delay if container ran for at least 10secs ([#22125](https://github.com/docker/docker/pull/22125)) -- Remove error message when a container restart get cancelled ([#22123](https://github.com/docker/docker/pull/22123)) -- Fix an issue where `docker` would not correctly clean up after `docker exec` ([#22121](https://github.com/docker/docker/pull/22121)) -- Fix a panic that could occur when serving concurrent `docker stats` commands ([#22120](https://github.com/docker/docker/pull/22120))` -- Revert deprecation of non-existent host directories auto-creation ([#22065](https://github.com/docker/docker/pull/22065)) -- Hide misleading rpc error on daemon shutdown ([#22058](https://github.com/docker/docker/pull/22058)) - -## 1.11.0 (2016-04-13) - -**IMPORTANT**: With Docker 1.11, a Linux docker installation is now made of 4 binaries (`docker`, [`docker-containerd`](https://github.com/docker/containerd), [`docker-containerd-shim`](https://github.com/docker/containerd) and [`docker-runc`](https://github.com/opencontainers/runc)). If you have scripts relying on docker being a single static binaries, please make sure to update them. Interaction with the daemon stay the same otherwise, the usage of the other binaries should be transparent. A Windows docker installation remains a single binary, `docker.exe`. - -### Builder - -- Fix a bug where Docker would not use the correct uid/gid when processing the `WORKDIR` command ([#21033](https://github.com/docker/docker/pull/21033)) -- Fix a bug where copy operations with userns would not use the proper uid/gid ([#20782](https://github.com/docker/docker/pull/20782), [#21162](https://github.com/docker/docker/pull/21162)) - -### Client - -* Usage of the `:` separator for security option has been deprecated. `=` should be used instead ([#21232](https://github.com/docker/docker/pull/21232)) -+ The client user agent is now passed to the registry on `pull`, `build`, `push`, `login` and `search` operations ([#21306](https://github.com/docker/docker/pull/21306), [#21373](https://github.com/docker/docker/pull/21373)) -* Allow setting the Domainname and Hostname separately through the API ([#20200](https://github.com/docker/docker/pull/20200)) -* Docker info will now warn users if it can not detect the kernel version or the operating system ([#21128](https://github.com/docker/docker/pull/21128)) -- Fix an issue where `docker stats --no-stream` output could be all 0s ([#20803](https://github.com/docker/docker/pull/20803)) -- Fix a bug where some newly started container would not appear in a running `docker stats` command ([#20792](https://github.com/docker/docker/pull/20792)) -* Post processing is no longer enabled for linux-cgo terminals ([#20587](https://github.com/docker/docker/pull/20587)) -- Values to `--hostname` are now refused if they do not comply with [RFC1123](https://tools.ietf.org/html/rfc1123) ([#20566](https://github.com/docker/docker/pull/20566)) -+ Docker learned how to use a SOCKS proxy ([#20366](https://github.com/docker/docker/pull/20366), [#18373](https://github.com/docker/docker/pull/18373)) -+ Docker now supports external credential stores ([#20107](https://github.com/docker/docker/pull/20107)) -* `docker ps` now supports displaying the list of volumes mounted inside a container ([#20017](https://github.com/docker/docker/pull/20017)) -* `docker info` now also reports Docker's root directory location ([#19986](https://github.com/docker/docker/pull/19986)) -- Docker now prohibits login in with an empty username (spaces are trimmed) ([#19806](https://github.com/docker/docker/pull/19806)) -* Docker events attributes are now sorted by key ([#19761](https://github.com/docker/docker/pull/19761)) -* `docker ps` no longer shows exported port for stopped containers ([#19483](https://github.com/docker/docker/pull/19483)) -- Docker now cleans after itself if a save/export command fails ([#17849](https://github.com/docker/docker/pull/17849)) -* Docker load learned how to display a progress bar ([#17329](https://github.com/docker/docker/pull/17329), [#120078](https://github.com/docker/docker/pull/20078)) - -### Distribution - -- Fix a panic that occurred when pulling an image with 0 layers ([#21222](https://github.com/docker/docker/pull/21222)) -- Fix a panic that could occur on error while pushing to a registry with a misconfigured token service ([#21212](https://github.com/docker/docker/pull/21212)) -+ All first-level delegation roles are now signed when doing a trusted push ([#21046](https://github.com/docker/docker/pull/21046)) -+ OAuth support for registries was added ([#20970](https://github.com/docker/docker/pull/20970)) -* `docker login` now handles token using the implementation found in [docker/distribution](https://github.com/docker/distribution) ([#20832](https://github.com/docker/docker/pull/20832)) -* `docker login` will no longer prompt for an email ([#20565](https://github.com/docker/docker/pull/20565)) -* Docker will now fallback to registry V1 if no basic auth credentials are available ([#20241](https://github.com/docker/docker/pull/20241)) -* Docker will now try to resume layer download where it left off after a network error/timeout ([#19840](https://github.com/docker/docker/pull/19840)) -- Fix generated manifest mediaType when pushing cross-repository ([#19509](https://github.com/docker/docker/pull/19509)) -- Fix docker requesting additional push credentials when pulling an image if Content Trust is enabled ([#20382](https://github.com/docker/docker/pull/20382)) - -### Logging - -- Fix a race in the journald log driver ([#21311](https://github.com/docker/docker/pull/21311)) -* Docker syslog driver now uses the RFC-5424 format when emitting logs ([#20121](https://github.com/docker/docker/pull/20121)) -* Docker GELF log driver now allows to specify the compression algorithm and level via the `gelf-compression-type` and `gelf-compression-level` options ([#19831](https://github.com/docker/docker/pull/19831)) -* Docker daemon learned to output uncolorized logs via the `--raw-logs` options ([#19794](https://github.com/docker/docker/pull/19794)) -+ Docker, on Windows platform, now includes an ETW (Event Tracing in Windows) logging driver named `etwlogs` ([#19689](https://github.com/docker/docker/pull/19689)) -* Journald log driver learned how to handle tags ([#19564](https://github.com/docker/docker/pull/19564)) -+ The fluentd log driver learned the following options: `fluentd-address`, `fluentd-buffer-limit`, `fluentd-retry-wait`, `fluentd-max-retries` and `fluentd-async-connect` ([#19439](https://github.com/docker/docker/pull/19439)) -+ Docker learned to send log to Google Cloud via the new `gcplogs` logging driver. ([#18766](https://github.com/docker/docker/pull/18766)) - - -### Misc - -+ When saving linked images together with `docker save` a subsequent `docker load` will correctly restore their parent/child relationship ([#21385](https://github.com/docker/docker/pull/c)) -+ Support for building the Docker cli for OpenBSD was added ([#21325](https://github.com/docker/docker/pull/21325)) -+ Labels can now be applied at network, volume and image creation ([#21270](https://github.com/docker/docker/pull/21270)) -* The `dockremap` is now created as a system user ([#21266](https://github.com/docker/docker/pull/21266)) -- Fix a few response body leaks ([#21258](https://github.com/docker/docker/pull/21258)) -- Docker, when run as a service with systemd, will now properly manage its processes cgroups ([#20633](https://github.com/docker/docker/pull/20633)) -* `docker info` now reports the value of cgroup KernelMemory or emits a warning if it is not supported ([#20863](https://github.com/docker/docker/pull/20863)) -* `docker info` now also reports the cgroup driver in use ([#20388](https://github.com/docker/docker/pull/20388)) -* Docker completion is now available on PowerShell ([#19894](https://github.com/docker/docker/pull/19894)) -* `dockerinit` is no more ([#19490](https://github.com/docker/docker/pull/19490),[#19851](https://github.com/docker/docker/pull/19851)) -+ Support for building Docker on arm64 was added ([#19013](https://github.com/docker/docker/pull/19013)) -+ Experimental support for building docker.exe in a native Windows Docker installation ([#18348](https://github.com/docker/docker/pull/18348)) - -### Networking - -- Fix panic if a node is forcibly removed from the cluster ([#21671](https://github.com/docker/docker/pull/21671)) -- Fix "error creating vxlan interface" when starting a container in a Swarm cluster ([#21671](https://github.com/docker/docker/pull/21671)) -* `docker network inspect` will now report all endpoints whether they have an active container or not ([#21160](https://github.com/docker/docker/pull/21160)) -+ Experimental support for the MacVlan and IPVlan network drivers has been added ([#21122](https://github.com/docker/docker/pull/21122)) -* Output of `docker network ls` is now sorted by network name ([#20383](https://github.com/docker/docker/pull/20383)) -- Fix a bug where Docker would allow a network to be created with the reserved `default` name ([#19431](https://github.com/docker/docker/pull/19431)) -* `docker network inspect` returns whether a network is internal or not ([#19357](https://github.com/docker/docker/pull/19357)) -+ Control IPv6 via explicit option when creating a network (`docker network create --ipv6`). This shows up as a new `EnableIPv6` field in `docker network inspect` ([#17513](https://github.com/docker/docker/pull/17513)) -* Support for AAAA Records (aka IPv6 Service Discovery) in embedded DNS Server ([#21396](https://github.com/docker/docker/pull/21396)) -- Fix to not forward docker domain IPv6 queries to external servers ([#21396](https://github.com/docker/docker/pull/21396)) -* Multiple A/AAAA records from embedded DNS Server for DNS Round robin ([#21019](https://github.com/docker/docker/pull/21019)) -- Fix endpoint count inconsistency after an ungraceful dameon restart ([#21261](https://github.com/docker/docker/pull/21261)) -- Move the ownership of exposed ports and port-mapping options from Endpoint to Sandbox ([#21019](https://github.com/docker/docker/pull/21019)) -- Fixed a bug which prevents docker reload when host is configured with ipv6.disable=1 ([#21019](https://github.com/docker/docker/pull/21019)) -- Added inbuilt nil IPAM driver ([#21019](https://github.com/docker/docker/pull/21019)) -- Fixed bug in iptables.Exists() logic [#21019](https://github.com/docker/docker/pull/21019) -- Fixed a Veth interface leak when using overlay network ([#21019](https://github.com/docker/docker/pull/21019)) -- Fixed a bug which prevents docker reload after a network delete during shutdown ([#20214](https://github.com/docker/docker/pull/20214)) -- Make sure iptables chains are recreated on firewalld reload ([#20419](https://github.com/docker/docker/pull/20419)) -- Allow to pass global datastore during config reload ([#20419](https://github.com/docker/docker/pull/20419)) -- For anonymous containers use the alias name for IP to name mapping, ie:DNS PTR record ([#21019](https://github.com/docker/docker/pull/21019)) -- Fix a panic when deleting an entry from /etc/hosts file ([#21019](https://github.com/docker/docker/pull/21019)) -- Source the forwarded DNS queries from the container net namespace ([#21019](https://github.com/docker/docker/pull/21019)) -- Fix to retain the network internal mode config for bridge networks on daemon reload ([#21780] (https://github.com/docker/docker/pull/21780)) -- Fix to retain IPAM driver option configs on daemon reload ([#21914] (https://github.com/docker/docker/pull/21914)) - -### Plugins - -- Fix a file descriptor leak that would occur every time plugins were enumerated ([#20686](https://github.com/docker/docker/pull/20686)) -- Fix an issue where Authz plugin would corrupt the payload body when faced with a large amount of data ([#20602](https://github.com/docker/docker/pull/20602)) - -### Runtime - -- Fix a panic that could occur when cleanup after a container started with invalid parameters ([#21716](https://github.com/docker/docker/pull/21716)) -- Fix a race with event timers stopping early ([#21692](https://github.com/docker/docker/pull/21692)) -- Fix race conditions in the layer store, potentially corrupting the map and crashing the process ([#21677](https://github.com/docker/docker/pull/21677)) -- Un-deprecate auto-creation of host directories for mounts. This feature was marked deprecated in ([#21666](https://github.com/docker/docker/pull/21666)) - Docker 1.9, but was decided to be too much of a backward-incompatible change, so it was decided to keep the feature. -+ It is now possible for containers to share the NET and IPC namespaces when `userns` is enabled ([#21383](https://github.com/docker/docker/pull/21383)) -+ `docker inspect ` will now expose the rootfs layers ([#21370](https://github.com/docker/docker/pull/21370)) -+ Docker Windows gained a minimal `top` implementation ([#21354](https://github.com/docker/docker/pull/21354)) -* Docker learned to report the faulty exe when a container cannot be started due to its condition ([#21345](https://github.com/docker/docker/pull/21345)) -* Docker with device mapper will now refuse to run if `udev sync` is not available ([#21097](https://github.com/docker/docker/pull/21097)) -- Fix a bug where Docker would not validate the config file upon configuration reload ([#21089](https://github.com/docker/docker/pull/21089)) -- Fix a hang that would happen on attach if initial start was to fail ([#21048](https://github.com/docker/docker/pull/21048)) -- Fix an issue where registry service options in the daemon configuration file were not properly taken into account ([#21045](https://github.com/docker/docker/pull/21045)) -- Fix a race between the exec and resize operations ([#21022](https://github.com/docker/docker/pull/21022)) -- Fix an issue where nanoseconds were not correctly taken in account when filtering Docker events ([#21013](https://github.com/docker/docker/pull/21013)) -- Fix the handling of Docker command when passed a 64 bytes id ([#21002](https://github.com/docker/docker/pull/21002)) -* Docker will now return a `204` (i.e http.StatusNoContent) code when it successfully deleted a network ([#20977](https://github.com/docker/docker/pull/20977)) -- Fix a bug where the daemon would wait indefinitely in case the process it was about to killed had already exited on its own ([#20967](https://github.com/docker/docker/pull/20967) -* The devmapper driver learned the `dm.min_free_space` option. If the mapped device free space reaches the passed value, new device creation will be prohibited. ([#20786](https://github.com/docker/docker/pull/20786)) -+ Docker can now prevent processes in container to gain new privileges via the `--security-opt=no-new-privileges` flag ([#20727](https://github.com/docker/docker/pull/20727)) -- Starting a container with the `--device` option will now correctly resolves symlinks ([#20684](https://github.com/docker/docker/pull/20684)) -+ Docker now relies on [`containerd`](https://github.com/docker/containerd) and [`runc`](https://github.com/opencontainers/runc) to spawn containers. ([#20662](https://github.com/docker/docker/pull/20662)) -- Fix docker configuration reloading to only alter value present in the given config file ([#20604](https://github.com/docker/docker/pull/20604)) -+ Docker now allows setting a container hostname via the `--hostname` flag when `--net=host` ([#20177](https://github.com/docker/docker/pull/20177)) -+ Docker now allows executing privileged container while running with `--userns-remap` if both `--privileged` and the new `--userns=host` flag are specified ([#20111](https://github.com/docker/docker/pull/20111)) -- Fix Docker not cleaning up correctly old containers upon restarting after a crash ([#19679](https://github.com/docker/docker/pull/19679)) -* Docker will now error out if it doesn't recognize a configuration key within the config file ([#19517](https://github.com/docker/docker/pull/19517)) -- Fix container loading, on daemon startup, when they depends on a plugin running within a container ([#19500](https://github.com/docker/docker/pull/19500)) -* `docker update` learned how to change a container restart policy ([#19116](https://github.com/docker/docker/pull/19116)) -* `docker inspect` now also returns a new `State` field containing the container state in a human readable way (i.e. one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`)([#18966](https://github.com/docker/docker/pull/18966)) -+ Docker learned to limit the number of active pids (i.e. processes) within the container via the `pids-limit` flags. NOTE: This requires `CGROUP_PIDS=y` to be in the kernel configuration. ([#18697](https://github.com/docker/docker/pull/18697)) -- `docker load` now has a `--quiet` option to suppress the load output ([#20078](https://github.com/docker/docker/pull/20078)) -- Fix a bug in neighbor discovery for IPv6 peers ([#20842](https://github.com/docker/docker/pull/20842)) -- Fix a panic during cleanup if a container was started with invalid options ([#21802](https://github.com/docker/docker/pull/21802)) -- Fix a situation where a container cannot be stopped if the terminal is closed ([#21840](https://github.com/docker/docker/pull/21840)) - -### Security - -* Object with the `pcp_pmcd_t` selinux type were given management access to `/var/lib/docker(/.*)?` ([#21370](https://github.com/docker/docker/pull/21370)) -* `restart_syscall`, `copy_file_range`, `mlock2` joined the list of allowed calls in the default seccomp profile ([#21117](https://github.com/docker/docker/pull/21117), [#21262](https://github.com/docker/docker/pull/21262)) -* `send`, `recv` and `x32` were added to the list of allowed syscalls and arch in the default seccomp profile ([#19432](https://github.com/docker/docker/pull/19432)) -* Docker Content Trust now requests the server to perform snapshot signing ([#21046](https://github.com/docker/docker/pull/21046)) -* Support for using YubiKeys for Content Trust signing has been moved out of experimental ([#21591](https://github.com/docker/docker/pull/21591)) - -### Volumes - -* Output of `docker volume ls` is now sorted by volume name ([#20389](https://github.com/docker/docker/pull/20389)) -* Local volumes can now accept options similar to the unix `mount` tool ([#20262](https://github.com/docker/docker/pull/20262)) -- Fix an issue where one letter directory name could not be used as source for volumes ([#21106](https://github.com/docker/docker/pull/21106)) -+ `docker run -v` now accepts a new flag `nocopy`. This tells the runtime not to copy the container path content into the volume (which is the default behavior) ([#21223](https://github.com/docker/docker/pull/21223)) - -## 1.10.3 (2016-03-10) - -### Runtime - -- Fix Docker client exiting with an "Unrecognized input header" error [#20706](https://github.com/docker/docker/pull/20706) -- Fix Docker exiting if Exec is started with both `AttachStdin` and `Detach` [#20647](https://github.com/docker/docker/pull/20647) - -### Distribution - -- Fix a crash when pushing multiple images sharing the same layers to the same repository in parallel [#20831](https://github.com/docker/docker/pull/20831) -- Fix a panic when pushing images to a registry which uses a misconfigured token service [#21030](https://github.com/docker/docker/pull/21030) - -### Plugin system - -- Fix issue preventing volume plugins to start when SELinux is enabled [#20834](https://github.com/docker/docker/pull/20834) -- Prevent Docker from exiting if a volume plugin returns a null response for Get requests [#20682](https://github.com/docker/docker/pull/20682) -- Fix plugin system leaking file descriptors if a plugin has an error [#20680](https://github.com/docker/docker/pull/20680) - -### Security - -- Fix linux32 emulation to fail during docker build [#20672](https://github.com/docker/docker/pull/20672) - It was due to the `personality` syscall being blocked by the default seccomp profile. -- Fix Oracle XE 10g failing to start in a container [#20981](https://github.com/docker/docker/pull/20981) - It was due to the `ipc` syscall being blocked by the default seccomp profile. -- Fix user namespaces not working on Linux From Scratch [#20685](https://github.com/docker/docker/pull/20685) -- Fix issue preventing daemon to start if userns is enabled and the `subuid` or `subgid` files contain comments [#20725](https://github.com/docker/docker/pull/20725) - -## 1.10.2 (2016-02-22) - -### Runtime - -- Prevent systemd from deleting containers' cgroups when its configuration is reloaded [#20518](https://github.com/docker/docker/pull/20518) -- Fix SELinux issues by disregarding `--read-only` when mounting `/dev/mqueue` [#20333](https://github.com/docker/docker/pull/20333) -- Fix chown permissions used during `docker cp` when userns is used [#20446](https://github.com/docker/docker/pull/20446) -- Fix configuration loading issue with all booleans defaulting to `true` [#20471](https://github.com/docker/docker/pull/20471) -- Fix occasional panic with `docker logs -f` [#20522](https://github.com/docker/docker/pull/20522) - -### Distribution - -- Keep layer reference if deletion failed to avoid a badly inconsistent state [#20513](https://github.com/docker/docker/pull/20513) -- Handle gracefully a corner case when canceling migration [#20372](https://github.com/docker/docker/pull/20372) -- Fix docker import on compressed data [#20367](https://github.com/docker/docker/pull/20367) -- Fix tar-split files corruption during migration that later cause docker push and docker save to fail [#20458](https://github.com/docker/docker/pull/20458) - -### Networking - -- Fix daemon crash if embedded DNS is sent garbage [#20510](https://github.com/docker/docker/pull/20510) - -### Volumes - -- Fix issue with multiple volume references with same name [#20381](https://github.com/docker/docker/pull/20381) - -### Security - -- Fix potential cache corruption and delegation conflict issues [#20523](https://github.com/docker/docker/pull/20523) - -## 1.10.1 (2016-02-11) - -### Runtime - -* Do not stop daemon on migration hard failure [#20156](https://github.com/docker/docker/pull/20156) -- Fix various issues with migration to content-addressable images [#20058](https://github.com/docker/docker/pull/20058) -- Fix ZFS permission bug with user namespaces [#20045](https://github.com/docker/docker/pull/20045) -- Do not leak /dev/mqueue from the host to all containers, keep it container-specific [#19876](https://github.com/docker/docker/pull/19876) [#20133](https://github.com/docker/docker/pull/20133) -- Fix `docker ps --filter before=...` to not show stopped containers without providing `-a` flag [#20135](https://github.com/docker/docker/pull/20135) - -### Security - -- Fix issue preventing docker events to work properly with authorization plugin [#20002](https://github.com/docker/docker/pull/20002) - -### Distribution - -* Add additional verifications and prevent from uploading invalid data to registries [#20164](https://github.com/docker/docker/pull/20164) -- Fix regression preventing uppercase characters in image reference hostname [#20175](https://github.com/docker/docker/pull/20175) - -### Networking - -- Fix embedded DNS for user-defined networks in the presence of firewalld [#20060](https://github.com/docker/docker/pull/20060) -- Fix issue where removing a network during shutdown left Docker inoperable [#20181](https://github.com/docker/docker/issues/20181) [#20235](https://github.com/docker/docker/issues/20235) -- Embedded DNS is now able to return compressed results [#20181](https://github.com/docker/docker/issues/20181) -- Fix port-mapping issue with `userland-proxy=false` [#20181](https://github.com/docker/docker/issues/20181) - -### Logging - -- Fix bug where tcp+tls protocol would be rejected [#20109](https://github.com/docker/docker/pull/20109) - -### Volumes - -- Fix issue whereby older volume drivers would not receive volume options [#19983](https://github.com/docker/docker/pull/19983) - -### Misc - -- Remove TasksMax from Docker systemd service [#20167](https://github.com/docker/docker/pull/20167) - -## 1.10.0 (2016-02-04) - -**IMPORTANT**: Docker 1.10 uses a new content-addressable storage for images and layers. -A migration is performed the first time docker is run, and can take a significant amount of time depending on the number of images present. -Refer to this page on the wiki for more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration -We also released a cool migration utility that enables you to perform the migration before updating to reduce downtime. -Engine 1.10 migrator can be found on Docker Hub: https://hub.docker.com/r/docker/v1.10-migrator/ - -### Runtime - -+ New `docker update` command that allows updating resource constraints on running containers [#15078](https://github.com/docker/docker/pull/15078) -+ Add `--tmpfs` flag to `docker run` to create a tmpfs mount in a container [#13587](https://github.com/docker/docker/pull/13587) -+ Add `--format` flag to `docker images` command [#17692](https://github.com/docker/docker/pull/17692) -+ Allow to set daemon configuration in a file and hot-reload it with the `SIGHUP` signal [#18587](https://github.com/docker/docker/pull/18587) -+ Updated docker events to include more meta-data and event types [#18888](https://github.com/docker/docker/pull/18888) - This change is backward compatible in the API, but not on the CLI. -+ Add `--blkio-weight-device` flag to `docker run` [#13959](https://github.com/docker/docker/pull/13959) -+ Add `--device-read-bps` and `--device-write-bps` flags to `docker run` [#14466](https://github.com/docker/docker/pull/14466) -+ Add `--device-read-iops` and `--device-write-iops` flags to `docker run` [#15879](https://github.com/docker/docker/pull/15879) -+ Add `--oom-score-adj` flag to `docker run` [#16277](https://github.com/docker/docker/pull/16277) -+ Add `--detach-keys` flag to `attach`, `run`, `start` and `exec` commands to override the default key sequence that detaches from a container [#15666](https://github.com/docker/docker/pull/15666) -+ Add `--shm-size` flag to `run`, `create` and `build` to set the size of `/dev/shm` [#16168](https://github.com/docker/docker/pull/16168) -+ Show the number of running, stopped, and paused containers in `docker info` [#19249](https://github.com/docker/docker/pull/19249) -+ Show the `OSType` and `Architecture` in `docker info` [#17478](https://github.com/docker/docker/pull/17478) -+ Add `--cgroup-parent` flag on `daemon` to set cgroup parent for all containers [#19062](https://github.com/docker/docker/pull/19062) -+ Add `-L` flag to docker cp to follow symlinks [#16613](https://github.com/docker/docker/pull/16613) -+ New `status=dead` filter for `docker ps` [#17908](https://github.com/docker/docker/pull/17908) -* Change `docker run` exit codes to distinguish between runtime and application errors [#14012](https://github.com/docker/docker/pull/14012) -* Enhance `docker events --since` and `--until` to support nanoseconds and timezones [#17495](https://github.com/docker/docker/pull/17495) -* Add `--all`/`-a` flag to `stats` to include both running and stopped containers [#16742](https://github.com/docker/docker/pull/16742) -* Change the default cgroup-driver to `cgroupfs` [#17704](https://github.com/docker/docker/pull/17704) -* Emit a "tag" event when tagging an image with `build -t` [#17115](https://github.com/docker/docker/pull/17115) -* Best effort for linked containers' start order when starting the daemon [#18208](https://github.com/docker/docker/pull/18208) -* Add ability to add multiple tags on `build` [#15780](https://github.com/docker/docker/pull/15780) -* Permit `OPTIONS` request against any url, thus fixing issue with CORS [#19569](https://github.com/docker/docker/pull/19569) -- Fix the `--quiet` flag on `docker build` to actually be quiet [#17428](https://github.com/docker/docker/pull/17428) -- Fix `docker images --filter dangling=false` to now show all non-dangling images [#19326](https://github.com/docker/docker/pull/19326) -- Fix race condition causing autorestart turning off on restart [#17629](https://github.com/docker/docker/pull/17629) -- Recognize GPFS filesystems [#19216](https://github.com/docker/docker/pull/19216) -- Fix obscure bug preventing to start containers [#19751](https://github.com/docker/docker/pull/19751) -- Forbid `exec` during container restart [#19722](https://github.com/docker/docker/pull/19722) -- devicemapper: Increasing `--storage-opt dm.basesize` will now increase the base device size on daemon restart [#19123](https://github.com/docker/docker/pull/19123) - -### Security - -+ Add `--userns-remap` flag to `daemon` to support user namespaces (previously in experimental) [#19187](https://github.com/docker/docker/pull/19187) -+ Add support for custom seccomp profiles in `--security-opt` [#17989](https://github.com/docker/docker/pull/17989) -+ Add default seccomp profile [#18780](https://github.com/docker/docker/pull/18780) -+ Add `--authorization-plugin` flag to `daemon` to customize ACLs [#15365](https://github.com/docker/docker/pull/15365) -+ Docker Content Trust now supports the ability to read and write user delegations [#18887](https://github.com/docker/docker/pull/18887) - This is an optional, opt-in feature that requires the explicit use of the Notary command-line utility in order to be enabled. - Enabling delegation support in a specific repository will break the ability of Docker 1.9 and 1.8 to pull from that repository, if content trust is enabled. -* Allow SELinux to run in a container when using the BTRFS storage driver [#16452](https://github.com/docker/docker/pull/16452) - -### Distribution - -* Use content-addressable storage for images and layers [#17924](https://github.com/docker/docker/pull/17924) - Note that a migration is performed the first time docker is run; it can take a significant amount of time depending on the number of images and containers present. - Images no longer depend on the parent chain but contain a list of layer references. - `docker load`/`docker save` tarballs now also contain content-addressable image configurations. - For more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration -* Add support for the new [manifest format ("schema2")](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) [#18785](https://github.com/docker/docker/pull/18785) -* Lots of improvements for push and pull: performance++, retries on failed downloads, cancelling on client disconnect [#18353](https://github.com/docker/docker/pull/18353), [#18418](https://github.com/docker/docker/pull/18418), [#19109](https://github.com/docker/docker/pull/19109), [#18353](https://github.com/docker/docker/pull/18353) -* Limit v1 protocol fallbacks [#18590](https://github.com/docker/docker/pull/18590) -- Fix issue where docker could hang indefinitely waiting for a nonexistent process to pull an image [#19743](https://github.com/docker/docker/pull/19743) - -### Networking - -+ Use DNS-based discovery instead of `/etc/hosts` [#19198](https://github.com/docker/docker/pull/19198) -+ Support for network-scoped alias using `--net-alias` on `run` and `--alias` on `network connect` [#19242](https://github.com/docker/docker/pull/19242) -+ Add `--ip` and `--ip6` on `run` and `network connect` to support custom IP addresses for a container in a network [#19001](https://github.com/docker/docker/pull/19001) -+ Add `--ipam-opt` to `network create` for passing custom IPAM options [#17316](https://github.com/docker/docker/pull/17316) -+ Add `--internal` flag to `network create` to restrict external access to and from the network [#19276](https://github.com/docker/docker/pull/19276) -+ Add `kv.path` option to `--cluster-store-opt` [#19167](https://github.com/docker/docker/pull/19167) -+ Add `discovery.heartbeat` and `discovery.ttl` options to `--cluster-store-opt` to configure discovery TTL and heartbeat timer [#18204](https://github.com/docker/docker/pull/18204) -+ Add `--format` flag to `network inspect` [#17481](https://github.com/docker/docker/pull/17481) -+ Add `--link` to `network connect` to provide a container-local alias [#19229](https://github.com/docker/docker/pull/19229) -+ Support for Capability exchange with remote IPAM plugins [#18775](https://github.com/docker/docker/pull/18775) -+ Add `--force` to `network disconnect` to force container to be disconnected from network [#19317](https://github.com/docker/docker/pull/19317) -* Support for multi-host networking using built-in overlay driver for all engine supported kernels: 3.10+ [#18775](https://github.com/docker/docker/pull/18775) -* `--link` is now supported on `docker run` for containers in user-defined network [#19229](https://github.com/docker/docker/pull/19229) -* Enhance `docker network rm` to allow removing multiple networks [#17489](https://github.com/docker/docker/pull/17489) -* Include container names in `network inspect` [#17615](https://github.com/docker/docker/pull/17615) -* Include auto-generated subnets for user-defined networks in `network inspect` [#17316](https://github.com/docker/docker/pull/17316) -* Add `--filter` flag to `network ls` to hide predefined networks [#17782](https://github.com/docker/docker/pull/17782) -* Add support for network connect/disconnect to stopped containers [#18906](https://github.com/docker/docker/pull/18906) -* Add network ID to container inspect [#19323](https://github.com/docker/docker/pull/19323) -- Fix MTU issue where Docker would not start with two or more default routes [#18108](https://github.com/docker/docker/pull/18108) -- Fix duplicate IP address for containers [#18106](https://github.com/docker/docker/pull/18106) -- Fix issue preventing sometimes docker from creating the bridge network [#19338](https://github.com/docker/docker/pull/19338) -- Do not substitute 127.0.0.1 name server when using `--net=host` [#19573](https://github.com/docker/docker/pull/19573) - -### Logging - -+ New logging driver for Splunk [#16488](https://github.com/docker/docker/pull/16488) -+ Add support for syslog over TCP+TLS [#18998](https://github.com/docker/docker/pull/18998) -* Enhance `docker logs --since` and `--until` to support nanoseconds and time [#17495](https://github.com/docker/docker/pull/17495) -* Enhance AWS logs to auto-detect region [#16640](https://github.com/docker/docker/pull/16640) - -### Volumes - -+ Add support to set the mount propagation mode for a volume [#17034](https://github.com/docker/docker/pull/17034) -* Add `ls` and `inspect` endpoints to volume plugin API [#16534](https://github.com/docker/docker/pull/16534) - Existing plugins need to make use of these new APIs to satisfy users' expectation - For that, please use the new MIME type `application/vnd.docker.plugins.v1.2+json` [#19549](https://github.com/docker/docker/pull/19549) -- Fix data not being copied to named volumes [#19175](https://github.com/docker/docker/pull/19175) -- Fix issues preventing volume drivers from being containerized [#19500](https://github.com/docker/docker/pull/19500) -- Fix `docker volumes ls --dangling=false` to now show all non-dangling volumes [#19671](https://github.com/docker/docker/pull/19671) -- Do not remove named volumes on container removal [#19568](https://github.com/docker/docker/pull/19568) -- Allow external volume drivers to host anonymous volumes [#19190](https://github.com/docker/docker/pull/19190) - -### Builder - -+ Add support for `**` in `.dockerignore` to wildcard multiple levels of directories [#17090](https://github.com/docker/docker/pull/17090) -- Fix handling of UTF-8 characters in Dockerfiles [#17055](https://github.com/docker/docker/pull/17055) -- Fix permissions problem when reading from STDIN [#19283](https://github.com/docker/docker/pull/19283) - -### Client - -+ Add support for overriding the API version to use via an `DOCKER_API_VERSION` environment-variable [#15964](https://github.com/docker/docker/pull/15964) -- Fix a bug preventing Windows clients to log in to Docker Hub [#19891](https://github.com/docker/docker/pull/19891) - -### Misc - -* systemd: Set TasksMax in addition to LimitNPROC in systemd service file [#19391](https://github.com/docker/docker/pull/19391) - -### Deprecations - -* Remove LXC support. The LXC driver was deprecated in Docker 1.8, and has now been removed [#17700](https://github.com/docker/docker/pull/17700) -* Remove `--exec-driver` daemon flag, because it is no longer in use [#17700](https://github.com/docker/docker/pull/17700) -* Remove old deprecated single-dashed long CLI flags (such as `-rm`; use `--rm` instead) [#17724](https://github.com/docker/docker/pull/17724) -* Deprecate HostConfig at API container start [#17799](https://github.com/docker/docker/pull/17799) -* Deprecate docker packages for newly EOL'd Linux distributions: Fedora 21 and Ubuntu 15.04 (Vivid) [#18794](https://github.com/docker/docker/pull/18794), [#18809](https://github.com/docker/docker/pull/18809) -* Deprecate `-f` flag for docker tag [#18350](https://github.com/docker/docker/pull/18350) - -## 1.9.1 (2015-11-21) - -### Runtime - -- Do not prevent daemon from booting if images could not be restored (#17695) -- Force IPC mount to unmount on daemon shutdown/init (#17539) -- Turn IPC unmount errors into warnings (#17554) -- Fix `docker stats` performance regression (#17638) -- Clarify cryptic error message upon `docker logs` if `--log-driver=none` (#17767) -- Fix seldom panics (#17639, #17634, #17703) -- Fix opq whiteouts problems for files with dot prefix (#17819) -- devicemapper: try defaulting to xfs instead of ext4 for performance reasons (#17903, #17918) -- devicemapper: fix displayed fs in docker info (#17974) -- selinux: only relabel if user requested so with the `z` option (#17450, #17834) -- Do not make network calls when normalizing names (#18014) - -### Client - -- Fix `docker login` on windows (#17738) -- Fix bug with `docker inspect` output when not connected to daemon (#17715) -- Fix `docker inspect -f {{.HostConfig.Dns}} somecontainer` (#17680) - -### Builder - -- Fix regression with symlink behavior in ADD/COPY (#17710) - -### Networking - -- Allow passing a network ID as an argument for `--net` (#17558) -- Fix connect to host and prevent disconnect from host for `host` network (#17476) -- Fix `--fixed-cidr` issue when gateway ip falls in ip-range and ip-range is - not the first block in the network (#17853) -- Restore deterministic `IPv6` generation from `MAC` address on default `bridge` network (#17890) -- Allow port-mapping only for endpoints created on docker run (#17858) -- Fixed an endpoint delete issue with a possible stale sbox (#18102) - -### Distribution - -- Correct parent chain in v2 push when v1Compatibility files on the disk are inconsistent (#18047) - -## 1.9.0 (2015-11-03) - -### Runtime - -+ `docker stats` now returns block IO metrics (#15005) -+ `docker stats` now details network stats per interface (#15786) -+ Add `ancestor=` filter to `docker ps --filter` flag to filter -containers based on their ancestor images (#14570) -+ Add `label=` filter to `docker ps --filter` to filter containers -based on label (#16530) -+ Add `--kernel-memory` flag to `docker run` (#14006) -+ Add `--message` flag to `docker import` allowing to specify an optional -message (#15711) -+ Add `--privileged` flag to `docker exec` (#14113) -+ Add `--stop-signal` flag to `docker run` allowing to replace the container -process stopping signal (#15307) -+ Add a new `unless-stopped` restart policy (#15348) -+ Inspecting an image now returns tags (#13185) -+ Add container size information to `docker inspect` (#15796) -+ Add `RepoTags` and `RepoDigests` field to `/images/{name:.*}/json` (#17275) -- Remove the deprecated `/container/ps` endpoint from the API (#15972) -- Send and document correct HTTP codes for `/exec//start` (#16250) -- Share shm and mqueue between containers sharing IPC namespace (#15862) -- Event stream now shows OOM status when `--oom-kill-disable` is set (#16235) -- Ensure special network files (/etc/hosts etc.) are read-only if bind-mounted -with `ro` option (#14965) -- Improve `rmi` performance (#16890) -- Do not update /etc/hosts for the default bridge network, except for links (#17325) -- Fix conflict with duplicate container names (#17389) -- Fix an issue with incorrect template execution in `docker inspect` (#17284) -- DEPRECATE `-c` short flag variant for `--cpu-shares` in docker run (#16271) - -### Client - -+ Allow `docker import` to import from local files (#11907) - -### Builder - -+ Add a `STOPSIGNAL` Dockerfile instruction allowing to set a different -stop-signal for the container process (#15307) -+ Add an `ARG` Dockerfile instruction and a `--build-arg` flag to `docker build` -that allows to add build-time environment variables (#15182) -- Improve cache miss performance (#16890) - -### Storage - -- devicemapper: Implement deferred deletion capability (#16381) - -## Networking - -+ `docker network` exits experimental and is part of standard release (#16645) -+ New network top-level concept, with associated subcommands and API (#16645) - WARNING: the API is different from the experimental API -+ Support for multiple isolated/micro-segmented networks (#16645) -+ Built-in multihost networking using VXLAN based overlay driver (#14071) -+ Support for third-party network plugins (#13424) -+ Ability to dynamically connect containers to multiple networks (#16645) -+ Support for user-defined IP address management via pluggable IPAM drivers (#16910) -+ Add daemon flags `--cluster-store` and `--cluster-advertise` for built-in nodes discovery (#16229) -+ Add `--cluster-store-opt` for setting up TLS settings (#16644) -+ Add `--dns-opt` to the daemon (#16031) -- DEPRECATE following container `NetworkSettings` fields in API v1.21: `EndpointID`, `Gateway`, - `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, `IPPrefixLen`, `IPv6Gateway` and `MacAddress`. - Those are now specific to the `bridge` network. Use `NetworkSettings.Networks` to inspect - the networking settings of a container per network. - -### Volumes - -+ New top-level `volume` subcommand and API (#14242) -- Move API volume driver settings to host-specific config (#15798) -- Print an error message if volume name is not unique (#16009) -- Ensure volumes created from Dockerfiles always use the local volume driver -(#15507) -- DEPRECATE auto-creating missing host paths for bind mounts (#16349) - -### Logging - -+ Add `awslogs` logging driver for Amazon CloudWatch (#15495) -+ Add generic `tag` log option to allow customizing container/image -information passed to driver (e.g. show container names) (#15384) -- Implement the `docker logs` endpoint for the journald driver (#13707) -- DEPRECATE driver-specific log tags (e.g. `syslog-tag`, etc.) (#15384) - -### Distribution - -+ `docker search` now works with partial names (#16509) -- Push optimization: avoid buffering to file (#15493) -- The daemon will display progress for images that were already being pulled -by another client (#15489) -- Only permissions required for the current action being performed are requested (#) -+ Renaming trust keys (and respective environment variables) from `offline` to -`root` and `tagging` to `repository` (#16894) -- DEPRECATE trust key environment variables -`DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and -`DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` (#16894) - -### Security - -+ Add SELinux profiles to the rpm package (#15832) -- Fix various issues with AppArmor profiles provided in the deb package -(#14609) -- Add AppArmor policy that prevents writing to /proc (#15571) - -## 1.8.3 (2015-10-12) - -### Distribution - -- Fix layer IDs lead to local graph poisoning (CVE-2014-8178) -- Fix manifest validation and parsing logic errors allow pull-by-digest validation bypass (CVE-2014-8179) -+ Add `--disable-legacy-registry` to prevent a daemon from using a v1 registry - -## 1.8.2 (2015-09-10) - -### Distribution - -- Fixes rare edge case of handling GNU LongLink and LongName entries. -- Fix ^C on docker pull. -- Fix docker pull issues on client disconnection. -- Fix issue that caused the daemon to panic when loggers weren't configured properly. -- Fix goroutine leak pulling images from registry V2. - -### Runtime - -- Fix a bug mounting cgroups for docker daemons running inside docker containers. -- Initialize log configuration properly. - -### Client: - -- Handle `-q` flag in `docker ps` properly when there is a default format. - -### Networking - -- Fix several corner cases with netlink. - -### Contrib - -- Fix several issues with bash completion. - -## 1.8.1 (2015-08-12) - -### Distribution - -* Fix a bug where pushing multiple tags would result in invalid images - -## 1.8.0 (2015-08-11) - -### Distribution - -+ Trusted pull, push and build, disabled by default -* Make tar layers deterministic between registries -* Don't allow deleting the image of running containers -* Check if a tag name to load is a valid digest -* Allow one character repository names -* Add a more accurate error description for invalid tag name -* Make build cache ignore mtime - -### Cli - -+ Add support for DOCKER_CONFIG/--config to specify config file dir -+ Add --type flag for docker inspect command -+ Add formatting options to `docker ps` with `--format` -+ Replace `docker -d` with new subcommand `docker daemon` -* Zsh completion updates and improvements -* Add some missing events to bash completion -* Support daemon urls with base paths in `docker -H` -* Validate status= filter to docker ps -* Display when a container is in --net=host in docker ps -* Extend docker inspect to export image metadata related to graph driver -* Restore --default-gateway{,-v6} daemon options -* Add missing unpublished ports in docker ps -* Allow duration strings in `docker events` as --since/--until -* Expose more mounts information in `docker inspect` - -### Runtime - -+ Add new Fluentd logging driver -+ Allow `docker import` to load from local files -+ Add logging driver for GELF via UDP -+ Allow to copy files from host to containers with `docker cp` -+ Promote volume drivers from experimental to master -+ Add rollover options to json-file log driver, and --log-driver-opts flag -+ Add memory swappiness tuning options -* Remove cgroup read-only flag when privileged -* Make /proc, /sys, & /dev readonly for readonly containers -* Add cgroup bind mount by default -* Overlay: Export metadata for container and image in `docker inspect` -* Devicemapper: external device activation -* Devicemapper: Compare uuid of base device on startup -* Remove RC4 from the list of registry cipher suites -* Add syslog-facility option -* LXC execdriver compatibility with recent LXC versions -* Mark LXC execriver as deprecated (to be removed with the migration to runc) - -### Plugins - -* Separate plugin sockets and specs locations -* Allow TLS connections to plugins - -### Bug fixes - -- Add missing 'Names' field to /containers/json API output -- Make `docker rmi` of dangling images safe while pulling -- Devicemapper: Change default basesize to 100G -- Go Scheduler issue with sync.Mutex and gcc -- Fix issue where Search API endpoint would panic due to empty AuthConfig -- Set image canonical names correctly -- Check dockerinit only if lxc driver is used -- Fix ulimit usage of nproc -- Always attach STDIN if -i,--interactive is specified -- Show error messages when saving container state fails -- Fixed incorrect assumption on --bridge=none treated as disable network -- Check for invalid port specifications in host configuration -- Fix endpoint leave failure for --net=host mode -- Fix goroutine leak in the stats API if the container is not running -- Check for apparmor file before reading it -- Fix DOCKER_TLS_VERIFY being ignored -- Set umask to the default on startup -- Correct the message of pause and unpause a non-running container -- Adjust disallowed CpuShares in container creation -- ZFS: correctly apply selinux context -- Display empty string instead of when IP opt is nil -- `docker kill` returns error when container is not running -- Fix COPY/ADD quoted/json form -- Fix goroutine leak on logs -f with no output -- Remove panic in nat package on invalid hostport -- Fix container linking in Fedora 22 -- Fix error caused using default gateways outside of the allocated range -- Format times in inspect command with a template as RFC3339Nano -- Make registry client to accept 2xx and 3xx http status responses as successful -- Fix race issue that caused the daemon to crash with certain layer downloads failed in a specific order. -- Fix error when the docker ps format was not valid. -- Remove redundant ip forward check. -- Fix issue trying to push images to repository mirrors. -- Fix error cleaning up network entrypoints when there is an initialization issue. - -## 1.7.1 (2015-07-14) - -#### Runtime - -- Fix default user spawning exec process with `docker exec` -- Make `--bridge=none` not to configure the network bridge -- Publish networking stats properly -- Fix implicit devicemapper selection with static binaries -- Fix socket connections that hung intermittently -- Fix bridge interface creation on CentOS/RHEL 6.6 -- Fix local dns lookups added to resolv.conf -- Fix copy command mounting volumes -- Fix read/write privileges in volumes mounted with --volumes-from - -#### Remote API - -- Fix unmarshalling of Command and Entrypoint -- Set limit for minimum client version supported -- Validate port specification -- Return proper errors when attach/reattach fail - -#### Distribution - -- Fix pulling private images -- Fix fallback between registry V2 and V1 - -## 1.7.0 (2015-06-16) - -#### Runtime -+ Experimental feature: support for out-of-process volume plugins -* The userland proxy can be disabled in favor of hairpin NAT using the daemon’s `--userland-proxy=false` flag -* The `exec` command supports the `-u|--user` flag to specify the new process owner -+ Default gateway for containers can be specified daemon-wide using the `--default-gateway` and `--default-gateway-v6` flags -+ The CPU CFS (Completely Fair Scheduler) quota can be set in `docker run` using `--cpu-quota` -+ Container block IO can be controlled in `docker run` using`--blkio-weight` -+ ZFS support -+ The `docker logs` command supports a `--since` argument -+ UTS namespace can be shared with the host with `docker run --uts=host` - -#### Quality -* Networking stack was entirely rewritten as part of the libnetwork effort -* Engine internals refactoring -* Volumes code was entirely rewritten to support the plugins effort -+ Sending SIGUSR1 to a daemon will dump all goroutines stacks without exiting - -#### Build -+ Support ${variable:-value} and ${variable:+value} syntax for environment variables -+ Support resource management flags `--cgroup-parent`, `--cpu-period`, `--cpu-quota`, `--cpuset-cpus`, `--cpuset-mems` -+ git context changes with branches and directories -* The .dockerignore file support exclusion rules - -#### Distribution -+ Client support for v2 mirroring support for the official registry - -#### Bugfixes -* Firewalld is now supported and will automatically be used when available -* mounting --device recursively - -## 1.6.2 (2015-05-13) - -#### Runtime -- Revert change prohibiting mounting into /sys - -## 1.6.1 (2015-05-07) - -#### Security -- Fix read/write /proc paths (CVE-2015-3630) -- Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631) -- Fix opening of file-descriptor 1 (CVE-2015-3627) -- Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629) -- Prohibit mount of /sys - -#### Runtime -- Update AppArmor policy to not allow mounts - -## 1.6.0 (2015-04-07) - -#### Builder -+ Building images from an image ID -+ Build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...` -+ `commit --change` to apply specified Dockerfile instructions while committing the image -+ `import --change` to apply specified Dockerfile instructions while importing the image -+ Builds no longer continue in the background when canceled with CTRL-C - -#### Client -+ Windows Support - -#### Runtime -+ Container and image Labels -+ `--cgroup-parent` for specifying a parent cgroup to place container cgroup within -+ Logging drivers, `json-file`, `syslog`, or `none` -+ Pulling images by ID -+ `--ulimit` to set the ulimit on a container -+ `--default-ulimit` option on the daemon which applies to all created containers (and overwritten by `--ulimit` on run) - -## 1.5.0 (2015-02-10) - -#### Builder -+ Dockerfile to use for a given `docker build` can be specified with the `-f` flag -* Dockerfile and .dockerignore files can be themselves excluded as part of the .dockerignore file, thus preventing modifications to these files invalidating ADD or COPY instructions cache -* ADD and COPY instructions accept relative paths -* Dockerfile `FROM scratch` instruction is now interpreted as a no-base specifier -* Improve performance when exposing a large number of ports - -#### Hack -+ Allow client-side only integration tests for Windows -* Include docker-py integration tests against Docker daemon as part of our test suites - -#### Packaging -+ Support for the new version of the registry HTTP API -* Speed up `docker push` for images with a majority of already existing layers -- Fixed contacting a private registry through a proxy - -#### Remote API -+ A new endpoint will stream live container resource metrics and can be accessed with the `docker stats` command -+ Containers can be renamed using the new `rename` endpoint and the associated `docker rename` command -* Container `inspect` endpoint show the ID of `exec` commands running in this container -* Container `inspect` endpoint show the number of times Docker auto-restarted the container -* New types of event can be streamed by the `events` endpoint: ‘OOM’ (container died with out of memory), ‘exec_create’, and ‘exec_start' -- Fixed returned string fields which hold numeric characters incorrectly omitting surrounding double quotes - -#### Runtime -+ Docker daemon has full IPv6 support -+ The `docker run` command can take the `--pid=host` flag to use the host PID namespace, which makes it possible for example to debug host processes using containerized debugging tools -+ The `docker run` command can take the `--read-only` flag to make the container’s root filesystem mounted as readonly, which can be used in combination with volumes to force a container’s processes to only write to locations that will be persisted -+ Container total memory usage can be limited for `docker run` using the `--memory-swap` flag -* Major stability improvements for devicemapper storage driver -* Better integration with host system: containers will reflect changes to the host's `/etc/resolv.conf` file when restarted -* Better integration with host system: per-container iptable rules are moved to the DOCKER chain -- Fixed container exiting on out of memory to return an invalid exit code - -#### Other -* The HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables are properly taken into account by the client when connecting to the Docker daemon - -## 1.4.1 (2014-12-15) - -#### Runtime -- Fix issue with volumes-from and bind mounts not being honored after create - -## 1.4.0 (2014-12-11) - -#### Notable Features since 1.3.0 -+ Set key=value labels to the daemon (displayed in `docker info`), applied with - new `-label` daemon flag -+ Add support for `ENV` in Dockerfile of the form: - `ENV name=value name2=value2...` -+ New Overlayfs Storage Driver -+ `docker info` now returns an `ID` and `Name` field -+ Filter events by event name, container, or image -+ `docker cp` now supports copying from container volumes -- Fixed `docker tag`, so it honors `--force` when overriding a tag for existing - image. - -## 1.3.3 (2014-12-11) - -#### Security -- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) -- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) -- Validate image IDs (CVE-2014-9358) - -#### Runtime -- Fix an issue when image archives are being read slowly - -#### Client -- Fix a regression related to stdin redirection -- Fix a regression with `docker cp` when destination is the current directory - -## 1.3.2 (2014-11-20) - -#### Security -- Fix tar breakout vulnerability -* Extractions are now sandboxed chroot -- Security options are no longer committed to images - -#### Runtime -- Fix deadlock in `docker ps -f exited=1` -- Fix a bug when `--volumes-from` references a container that failed to start - -#### Registry -+ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 -* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag -- Skip the experimental registry v2 API when mirroring is enabled - -## 1.3.1 (2014-10-28) - -#### Security -* Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry -+ Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified - -#### Runtime -- Fix issue where volumes would not be shared - -#### Client -- Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` -- Fix docker run output to non-TTY stdout - -#### Builder -- Fix escaping `$` for environment variables -- Fix issue with lowercase `onbuild` Dockerfile instruction -- Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` - -## 1.3.0 (2014-10-14) - -#### Notable features since 1.2.0 -+ Docker `exec` allows you to run additional processes inside existing containers -+ Docker `create` gives you the ability to create a container via the CLI without executing a process -+ `--security-opts` options to allow user to customize container labels and apparmor profiles -+ Docker `ps` filters -- Wildcard support to COPY/ADD -+ Move production URLs to get.docker.com from get.docker.io -+ Allocate IP address on the bridge inside a valid CIDR -+ Use drone.io for PR and CI testing -+ Ability to setup an official registry mirror -+ Ability to save multiple images with docker `save` - -## 1.2.0 (2014-08-20) - -#### Runtime -+ Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime -+ Auto-restart containers using policies -+ Use /var/lib/docker/tmp for large temporary files -+ `--cap-add` and `--cap-drop` to tweak what linux capability you want -+ `--device` to use devices in containers - -#### Client -+ `docker search` on private registries -+ Add `exited` filter to `docker ps --filter` -* `docker rm -f` now kills instead of stop -+ Support for IPv6 addresses in `--dns` flag - -#### Proxy -+ Proxy instances in separate processes -* Small bug fix on UDP proxy - -## 1.1.2 (2014-07-23) - -#### Runtime -+ Fix port allocation for existing containers -+ Fix containers restart on daemon restart - -#### Packaging -+ Fix /etc/init.d/docker issue on Debian - -## 1.1.1 (2014-07-09) - -#### Builder -* Fix issue with ADD - -## 1.1.0 (2014-07-03) - -#### Notable features since 1.0.1 -+ Add `.dockerignore` support -+ Pause containers during `docker commit` -+ Add `--tail` to `docker logs` - -#### Builder -+ Allow a tar file as context for `docker build` -* Fix issue with white-spaces and multi-lines in `Dockerfiles` - -#### Runtime -* Overall performance improvements -* Allow `/` as source of `docker run -v` -* Fix port allocation -* Fix bug in `docker save` -* Add links information to `docker inspect` - -#### Client -* Improve command line parsing for `docker commit` - -#### Remote API -* Improve status code for the `start` and `stop` endpoints - -## 1.0.1 (2014-06-19) - -#### Notable features since 1.0.0 -* Enhance security for the LXC driver - -#### Builder -* Fix `ONBUILD` instruction passed to grandchildren - -#### Runtime -* Fix events subscription -* Fix /etc/hostname file with host networking -* Allow `-h` and `--net=none` -* Fix issue with hotplug devices in `--privileged` - -#### Client -* Fix artifacts with events -* Fix a panic with empty flags -* Fix `docker cp` on Mac OS X - -#### Miscellaneous -* Fix compilation on Mac OS X -* Fix several races - -## 1.0.0 (2014-06-09) - -#### Notable features since 0.12.0 -* Production support - -## 0.12.0 (2014-06-05) - -#### Notable features since 0.11.0 -* 40+ various improvements to stability, performance and usability -* New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file -* Inherit file permissions from the host on `ADD` -* New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer -* The `images` command has a `-f`/`--filter` option to filter the list of images -* Add `--force-rm` to clean up after a failed build -* Standardize JSON keys in Remote API to CamelCase -* Pull from a docker run now assumes `latest` tag if not specified -* Enhance security on Linux capabilities and device nodes - -## 0.11.1 (2014-05-07) - -#### Registry -- Fix push and pull to private registry - -## 0.11.0 (2014-05-07) - -#### Notable features since 0.10.0 - -* SELinux support for mount and process labels -* Linked containers can be accessed by hostname -* Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces -* Add a ping endpoint to the Remote API to do healthchecks of your docker daemon -* Logs can now be returned with an optional timestamp -* Docker now works with registries that support SHA-512 -* Multiple registry endpoints are supported to allow registry mirrors - -## 0.10.0 (2014-04-08) - -#### Builder -- Fix printing multiple messages on a single line. Fixes broken output during builds. -- Follow symlinks inside container's root for ADD build instructions. -- Fix EXPOSE caching. - -#### Documentation -- Add the new options of `docker ps` to the documentation. -- Add the options of `docker restart` to the documentation. -- Update daemon docs and help messages for --iptables and --ip-forward. -- Updated apt-cacher-ng docs example. -- Remove duplicate description of --mtu from docs. -- Add missing -t and -v for `docker images` to the docs. -- Add fixes to the cli docs. -- Update libcontainer docs. -- Update images in docs to remove references to AUFS and LXC. -- Update the nodejs_web_app in the docs to use the new epel RPM address. -- Fix external link on security of containers. -- Update remote API docs. -- Add image size to history docs. -- Be explicit about binding to all interfaces in redis example. -- Document DisableNetwork flag in the 1.10 remote api. -- Document that `--lxc-conf` is lxc only. -- Add chef usage documentation. -- Add example for an image with multiple for `docker load`. -- Explain what `docker run -a` does in the docs. - -#### Contrib -- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. -- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. -- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. -- Add check-config script to contrib. -- Fix fish shell completion. - -#### Hack -* Clean up "go test" output from "make test" to be much more readable/scannable. -* Exclude more "definitely not unit tested Go source code" directories from hack/make/test. -+ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. -- Include contributed completions in Ubuntu PPA. -+ Add cli integration tests. -* Add tweaks to the hack scripts to make them simpler. - -#### Remote API -+ Add TLS auth support for API. -* Move git clone from daemon to client. -- Fix content-type detection in docker cp. -* Split API into 2 go packages. - -#### Runtime -* Support hairpin NAT without going through Docker server. -- devicemapper: succeed immediately when removing non-existent devices. -- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). -- devicemapper: increase timeout in waitClose to 10 seconds. -- devicemapper: ensure we shut down thin pool cleanly. -- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. -- devicemapper: avoid AB-BA deadlock. -- devicemapper: make shutdown better/faster. -- improve alpha sorting in mflag. -- Remove manual http cookie management because the cookiejar is being used. -- Use BSD raw mode on Darwin. Fixes nano, tmux and others. -- Add FreeBSD support for the client. -- Merge auth package into registry. -- Add deprecation warning for -t on `docker pull`. -- Remove goroutine leak on error. -- Update parseLxcInfo to comply with new lxc1.0 format. -- Fix attach exit on darwin. -- Improve deprecation message. -- Retry to retrieve the layer metadata up to 5 times for `docker pull`. -- Only unshare the mount namespace for execin. -- Merge existing config when committing. -- Disable daemon startup timeout. -- Fix issue #4681: add loopback interface when networking is disabled. -- Add failing test case for issue #4681. -- Send SIGTERM to child, instead of SIGKILL. -- Show the driver and the kernel version in `docker info` even when not in debug mode. -- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. -- Fix issue caused by the absence of /etc/apparmor.d. -- Don't leave empty cidFile behind when failing to create the container. -- Mount cgroups automatically if they're not mounted already. -- Use mock for search tests. -- Update to double-dash everywhere. -- Move .dockerenv parsing to lxc driver. -- Move all bind-mounts in the container inside the namespace. -- Don't use separate bind mount for container. -- Always symlink /dev/ptmx for libcontainer. -- Don't kill by pid for other drivers. -- Add initial logging to libcontainer. -* Sort by port in `docker ps`. -- Move networking drivers into runtime top level package. -+ Add --no-prune to `docker rmi`. -+ Add time since exit in `docker ps`. -- graphdriver: add build tags. -- Prevent allocation of previously allocated ports & prevent improve port allocation. -* Add support for --since/--before in `docker ps`. -- Clean up container stop. -+ Add support for configurable dns search domains. -- Add support for relative WORKDIR instructions. -- Add --output flag for docker save. -- Remove duplication of DNS entries in config merging. -- Add cpuset.cpus to cgroups and native driver options. -- Remove docker-ci. -- Promote btrfs. btrfs is no longer considered experimental. -- Add --input flag to `docker load`. -- Return error when existing bridge doesn't match IP address. -- Strip comments before parsing line continuations to avoid interpreting instructions as comments. -- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. -- Add systemd implementation of cgroups and make containers show up as systemd units. -- Fix commit and import when no repository is specified. -- Remount /var/lib/docker as --private to fix scaling issue. -- Use the environment's proxy when pinging the remote registry. -- Reduce error level from harmless errors. -* Allow --volumes-from to be individual files. -- Fix expanding buffer in StdCopy. -- Set error regardless of attach or stdin. This fixes #3364. -- Add support for --env-file to load environment variables from files. -- Symlink /etc/mtab and /proc/mounts. -- Allow pushing a single tag. -- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. -- Don't throw error when starting an already running container. -- Fix dynamic port allocation limit. -- remove setupDev from libcontainer. -- Add API version to `docker version`. -- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. -- Fix --volumes-from mount failure. -- Allow non-privileged containers to create device nodes. -- Skip login tests because of external dependency on a hosted service. -- Deprecate `docker images --tree` and `docker images --viz`. -- Deprecate `docker insert`. -- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. -- Add specific error message when hitting 401 over HTTP on push. -- Fix absolute volume check. -- Remove volumes-from from the config. -- Move DNS options to hostconfig. -- Update the apparmor profile for libcontainer. -- Add deprecation notice for `docker commit -run`. - -## 0.9.1 (2014-03-24) - -#### Builder -- Fix printing multiple messages on a single line. Fixes broken output during builds. - -#### Documentation -- Fix external link on security of containers. - -#### Contrib -- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. -- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. - -#### Hack -- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. - -#### Remote API -- Fix content-type detection in `docker cp`. - -#### Runtime -- Use BSD raw mode on Darwin. Fixes nano, tmux and others. -- Only unshare the mount namespace for execin. -- Retry to retrieve the layer metadata up to 5 times for `docker pull`. -- Merge existing config when committing. -- Fix panic in monitor. -- Disable daemon startup timeout. -- Fix issue #4681: add loopback interface when networking is disabled. -- Add failing test case for issue #4681. -- Send SIGTERM to child, instead of SIGKILL. -- Show the driver and the kernel version in `docker info` even when not in debug mode. -- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. -- Fix issue caused by the absence of /etc/apparmor.d. -- Don't leave empty cidFile behind when failing to create the container. -- Improve deprecation message. -- Fix attach exit on darwin. -- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). -- devicemapper: succeed immediately when removing non-existent devices. -- devicemapper: increase timeout in waitClose to 10 seconds. -- Remove goroutine leak on error. -- Update parseLxcInfo to comply with new lxc1.0 format. - -## 0.9.0 (2014-03-10) - -#### Builder -- Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. -- Add error to docker build --rm. This adds missing error handling. -- Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. -- Make `--rm` the default for `docker build`. - -#### Documentation -- Download the docker client binary for Mac over https. -- Update the titles of the install instructions & descriptions. -* Add instructions for upgrading boot2docker. -* Add port forwarding example in OS X install docs. -- Attempt to disentangle repository and registry. -- Update docs to explain more about `docker ps`. -- Update sshd example to use a Dockerfile. -- Rework some examples, including the Python examples. -- Update docs to include instructions for a container's lifecycle. -- Update docs documentation to discuss the docs branch. -- Don't skip cert check for an example & use HTTPS. -- Bring back the memory and swap accounting section which was lost when the kernel page was removed. -- Explain DNS warnings and how to fix them on systems running and using a local nameserver. - -#### Contrib -- Add Tanglu support for mkimage-debootstrap. -- Add SteamOS support for mkimage-debootstrap. - -#### Hack -- Get package coverage when running integration tests. -- Remove the Vagrantfile. This is being replaced with boot2docker. -- Fix tests on systems where aufs isn't available. -- Update packaging instructions and remove the dependency on lxc. - -#### Remote API -* Move code specific to the API to the api package. -- Fix header content type for the API. Makes all endpoints use proper content type. -- Fix registry auth & remove ping calls from CmdPush and CmdPull. -- Add newlines to the JSON stream functions. - -#### Runtime -* Do not ping the registry from the CLI. All requests to registries flow through the daemon. -- Check for nil information return in the lxc driver. This fixes panics with older lxc versions. -- Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. -- Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. -* Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. -- Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. -- Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. -- Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. -- Fix `--run` in `docker commit`. This makes `docker commit --run` work again. -- Fix custom bridge related options. This makes custom bridges work again. -+ Mount-bind the PTY as container console. This allows tmux/screen to run. -+ Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. -+ Add native exec driver which uses libcontainer and make it the default exec driver. -- Add support for handling extended attributes in archives. -* Set the container MTU to be the same as the host MTU. -+ Add simple sha256 checksums for layers to speed up `docker push`. -* Improve kernel version parsing. -* Allow flag grouping (`docker run -it`). -- Remove chroot exec driver. -- Fix divide by zero to fix panic. -- Rewrite `docker rmi`. -- Fix docker info with lxc 1.0.0. -- Fix fedora tty with apparmor. -* Don't always append env vars, replace defaults with vars from config. -* Fix a goroutine leak. -* Switch to Go 1.2.1. -- Fix unique constraint error checks. -* Handle symlinks for Docker's data directory and for TMPDIR. -- Add deprecation warnings for flags (-flag is deprecated in favor of --flag) -- Add apparmor profile for the native execution driver. -* Move system specific code from archive to pkg/system. -- Fix duplicate signal for `docker run -i -t` (issue #3336). -- Return correct process pid for lxc. -- Add a -G option to specify the group which unix sockets belong to. -+ Add `-f` flag to `docker rm` to force removal of running containers. -+ Kill ghost containers and restart all ghost containers when the docker daemon restarts. -+ Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. - -## 0.8.1 (2014-02-18) - -#### Builder - -- Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper -- Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system -- Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported - -#### Documentation - -* Update issue filing instructions -* Warn against the use of symlinks for Docker's storage folder -* Replace the Firefox example with an IceWeasel example -* Rewrite the PostgresSQL example using a Dockerfile and add more details to it -* Improve the OS X documentation - -#### Remote API - -- Fix broken images API for version less than 1.7 -- Use the right encoding for all API endpoints which return JSON -- Move remote api client to api/ -- Queue calls to the API using generic socket wait - -#### Runtime - -- Fix the use of custom settings for bridges and custom bridges -- Refactor the devicemapper code to avoid many mount/unmount race conditions and failures -- Remove two panics which could make Docker crash in some situations -- Don't ping registry from the CLI client -- Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks -- Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration -- Remove directory when removing devicemapper device. This cleans up leftover mount directories -- Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration -- Ensure `docker cp` stream is closed properly -- Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper -- Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port -+ Mount-bind the PTY as container console. This allows tmux and screen to run in a container -- Clean up archive closing. This fixes and improves archive handling -- Fix engine tests on systems where temp directories are symlinked -- Add test methods for save and load -- Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart -- Support submodules when building from a GitHub repository -- Quote volume path to allow spaces -- Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs - -## 0.8.0 (2014-02-04) - -#### Notable features since 0.7.0 - -* Images and containers can be removed much faster -* Building an image from source with docker build is now much faster -* The Docker daemon starts and stops much faster -* The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations -* Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations -* All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar -* Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers -With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages -* Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change - -* The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed -* The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build -* Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write -* Docker is officially supported on Mac OS X -* The Docker daemon supports systemd socket activation - -## 0.7.6 (2014-01-14) - -#### Builder - -* Do not follow symlink outside of build context - -#### Runtime - -- Remount bind mounts when ro is specified -* Use https for fetching docker version - -#### Other - -* Inline the test.docker.io fingerprint -* Add ca-certificates to packaging documentation - -## 0.7.5 (2014-01-09) - -#### Builder - -* Disable compression for build. More space usage but a much faster upload -- Fix ADD caching for certain paths -- Do not compress archive from git build - -#### Documentation - -- Fix error in GROUP add example -* Make sure the GPG fingerprint is inline in the documentation -* Give more specific advice on setting up signing of commits for DCO - -#### Runtime - -- Fix misspelled container names -- Do not add hostname when networking is disabled -* Return most recent image from the cache by date -- Return all errors from docker wait -* Add Content-Type Header "application/json" to GET /version and /info responses - -#### Other - -* Update DCO to version 1.1 -+ Update Makefile to use "docker:GIT_BRANCH" as the generated image name -* Update Travis to check for new 1.1 DCO version - -## 0.7.4 (2014-01-07) - -#### Builder - -- Fix ADD caching issue with . prefixed path -- Fix docker build on devicemapper by reverting sparse file tar option -- Fix issue with file caching and prevent wrong cache hit -* Use same error handling while unmarshalling CMD and ENTRYPOINT - -#### Documentation - -* Simplify and streamline Amazon Quickstart -* Install instructions use unprefixed Fedora image -* Update instructions for mtu flag for Docker on GCE -+ Add Ubuntu Saucy to installation -- Fix for wrong version warning on master instead of latest - -#### Runtime - -- Only get the image's rootfs when we need to calculate the image size -- Correctly handle unmapping UDP ports -* Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build -- Fix login message to say pull instead of push -- Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN -* Make blank -H option default to the same as no -H was sent -* Extract cgroups utilities to own submodule - -#### Other - -+ Add Travis CI configuration to validate DCO and gofmt requirements -+ Add Developer Certificate of Origin Text -* Upgrade VBox Guest Additions -* Check standalone header when pinging a registry server - -## 0.7.3 (2014-01-02) - -#### Builder - -+ Update ADD to use the image cache, based on a hash of the added content -* Add error message for empty Dockerfile - -#### Documentation - -- Fix outdated link to the "Introduction" on www.docker.io -+ Update the docs to get wider when the screen does -- Add information about needing to install LXC when using raw binaries -* Update Fedora documentation to disentangle the docker and docker.io conflict -* Add a note about using the new `-mtu` flag in several GCE zones -+ Add FrugalWare installation instructions -+ Add a more complete example of `docker run` -- Fix API documentation for creating and starting Privileged containers -- Add missing "name" parameter documentation on "/containers/create" -* Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration -- Update the 1.8 API documentation with some additions that were added to the docs for 1.7 - -#### Hack - -- Add missing libdevmapper dependency to the packagers documentation -* Update minimum Go requirement to a hard line at Go 1.2+ -* Many minor improvements to the Vagrantfile -+ Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) -+ Add coverprofile generation reporting -- Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually -* Update Dockerfile to be more canonical and have less spurious warnings during build -- Fix some miscellaneous `docker pull` progress bar display issues -* Migrate more miscellaneous packages under the "pkg" folder -* Update TextMate highlighting to automatically be enabled for files named "Dockerfile" -* Reorganize syntax highlighting files under a common "contrib/syntax" directory -* Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation -* Add support for container names in bash completion - -#### Packaging - -+ Add an official Docker client binary for Darwin (Mac OS X) -* Remove empty "Vendor" string and added "License" on deb package -+ Add a stubbed version of "/etc/default/docker" in the deb package - -#### Runtime - -* Update layer application to extract tars in place, avoiding file churn while handling whiteouts -- Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) -* Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) -+ Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions -- Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files -* Update container name validation to include '.' -- Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected -* Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler -* Update to use proper box-drawing characters everywhere in `docker images -tree` -* Move MTU setting from LXC configuration to directly use netlink -* Add `-S` option to external tar invocation for more efficient spare file handling -+ Add arch/os info to User-Agent string, especially for registry requests -+ Add `-mtu` option to Docker daemon for configuring MTU -- Fix `docker build` to exit with a non-zero exit code on error -+ Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation - -## 0.7.2 (2013-12-16) - -#### Runtime - -+ Validate container names on creation with standard regex -* Increase maximum image depth to 127 from 42 -* Continue to move api endpoints to the job api -+ Add -bip flag to allow specification of dynamic bridge IP via CIDR -- Allow bridge creation when ipv6 is not enabled on certain systems -* Set hostname and IP address from within dockerinit -* Drop capabilities from within dockerinit -- Fix volumes on host when symlink is present the image -- Prevent deletion of image if ANY container is depending on it even if the container is not running -* Update docker push to use new progress display -* Use os.Lstat to allow mounting unix sockets when inspecting volumes -- Adjust handling of inactive user login -- Add missing defines in devicemapper for older kernels -- Allow untag operations with no container validation -- Add auth config to docker build - -#### Documentation - -* Add more information about Docker logging -+ Add RHEL documentation -* Add a direct example for changing the CMD that is run in a container -* Update Arch installation documentation -+ Add section on Trusted Builds -+ Add Network documentation page - -#### Other - -+ Add new cover bundle for providing code coverage reporting -* Separate integration tests in bundles -* Make Tianon the hack maintainer -* Update mkimage-debootstrap with more tweaks for keeping images small -* Use https to get the install script -* Remove vendored dotcloud/tar now that Go 1.2 has been released - -## 0.7.1 (2013-12-05) - -#### Documentation - -+ Add @SvenDowideit as documentation maintainer -+ Add links example -+ Add documentation regarding ambassador pattern -+ Add Google Cloud Platform docs -+ Add dockerfile best practices -* Update doc for RHEL -* Update doc for registry -* Update Postgres examples -* Update doc for Ubuntu install -* Improve remote api doc - -#### Runtime - -+ Add hostconfig to docker inspect -+ Implement `docker log -f` to stream logs -+ Add env variable to disable kernel version warning -+ Add -format to `docker inspect` -+ Support bind-mount for files -- Fix bridge creation on RHEL -- Fix image size calculation -- Make sure iptables are called even if the bridge already exists -- Fix issue with stderr only attach -- Remove init layer when destroying a container -- Fix same port binding on different interfaces -- `docker build` now returns the correct exit code -- Fix `docker port` to display correct port -- `docker build` now check that the dockerfile exists client side -- `docker attach` now returns the correct exit code -- Remove the name entry when the container does not exist - -#### Registry - -* Improve progress bars, add ETA for downloads -* Simultaneous pulls now waits for the first to finish instead of failing -- Tag only the top-layer image when pushing to registry -- Fix issue with offline image transfer -- Fix issue preventing using ':' in password for registry - -#### Other - -+ Add pprof handler for debug -+ Create a Makefile -* Use stdlib tar that now includes fix -* Improve make.sh test script -* Handle SIGQUIT on the daemon -* Disable verbose during tests -* Upgrade to go1.2 for official build -* Improve unit tests -* The test suite now runs all tests even if one fails -* Refactor C in Go (Devmapper) -- Fix OS X compilation - -## 0.7.0 (2013-11-25) - -#### Notable features since 0.6.0 - -* Storage drivers: choose from aufs, device-mapper, or vfs. -* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions. -* Links: compose complex software stacks by connecting containers to each other. -* Container naming: organize your containers by giving them memorable names. -* Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. -* Offline transfer: push and pull images to the filesystem without losing information. -* Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. - -## 0.6.7 (2013-11-21) - -#### Runtime - -* Improve stability, fixes some race conditions -* Skip the volumes mounted when deleting the volumes of container. -* Fix layer size computation: handle hard links correctly -* Use the work Path for docker cp CONTAINER:PATH -* Fix tmp dir never cleanup -* Speedup docker ps -* More informative error message on name collisions -* Fix nameserver regex -* Always return long id's -* Fix container restart race condition -* Keep published ports on docker stop;docker start -* Fix container networking on Fedora -* Correctly express "any address" to iptables -* Fix network setup when reconnecting to ghost container -* Prevent deletion if image is used by a running container -* Lock around read operations in graph - -#### RemoteAPI - -* Return full ID on docker rmi - -#### Client - -+ Add -tree option to images -+ Offline image transfer -* Exit with status 2 on usage error and display usage on stderr -* Do not forward SIGCHLD to container -* Use string timestamp for docker events -since - -#### Other - -* Update to go 1.2rc5 -+ Add /etc/default/docker support to upstart - -## 0.6.6 (2013-11-06) - -#### Runtime - -* Ensure container name on register -* Fix regression in /etc/hosts -+ Add lock around write operations in graph -* Check if port is valid -* Fix restart runtime error with ghost container networking -+ Add some more colors and animals to increase the pool of generated names -* Fix issues in docker inspect -+ Escape apparmor confinement -+ Set environment variables using a file. -* Prevent docker insert to erase something -+ Prevent DNS server conflicts in CreateBridgeIface -+ Validate bind mounts on the server side -+ Use parent image config in docker build -* Fix regression in /etc/hosts - -#### Client - -+ Add -P flag to publish all exposed ports -+ Add -notrunc and -q flags to docker history -* Fix docker commit, tag and import usage -+ Add stars, trusted builds and library flags in docker search -* Fix docker logs with tty - -#### RemoteAPI - -* Make /events API send headers immediately -* Do not split last column docker top -+ Add size to history - -#### Other - -+ Contrib: Desktop integration. Firefox usecase. -+ Dockerfile: bump to go1.2rc3 - -## 0.6.5 (2013-10-29) - -#### Runtime - -+ Containers can now be named -+ Containers can now be linked together for service discovery -+ 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors -+ Automatically start crashed containers after a reboot -+ Expose IP, port, and proto as separate environment vars for container links -* Allow ports to be published to specific ips -* Prohibit inter-container communication by default -- Ignore ErrClosedPipe for stdin in Container.Attach -- Remove unused field kernelVersion -* Fix issue when mounting subdirectories of /mnt in container -- Fix untag during removal of images -* Check return value of syscall.Chdir when changing working directory inside dockerinit - -#### Client - -- Only pass stdin to hijack when needed to avoid closed pipe errors -* Use less reflection in command-line method invocation -- Monitor the tty size after starting the container, not prior -- Remove useless os.Exit() calls after log.Fatal - -#### Hack - -+ Add initial init scripts library and a safer Ubuntu packaging script that works for Debian -* Add -p option to invoke debootstrap with http_proxy -- Update install.sh with $sh_c to get sudo/su for modprobe -* Update all the mkimage scripts to use --numeric-owner as a tar argument -* Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues - -#### Other - -* Documentation: Fix the flags for nc in example -* Testing: Remove warnings and prevent mount issues -- Testing: Change logic for tty resize to avoid warning in tests -- Builder: Fix race condition in docker build with verbose output -- Registry: Fix content-type for PushImageJSONIndex method -* Contrib: Improve helper tools to generate debian and Arch linux server images - -## 0.6.4 (2013-10-16) - -#### Runtime - -- Add cleanup of container when Start() fails -* Add better comments to utils/stdcopy.go -* Add utils.Errorf for error logging -+ Add -rm to docker run for removing a container on exit -- Remove error messages which are not actually errors -- Fix `docker rm` with volumes -- Fix some error cases where a HTTP body might not be closed -- Fix panic with wrong dockercfg file -- Fix the attach behavior with -i -* Record termination time in state. -- Use empty string so TempDir uses the OS's temp dir automatically -- Make sure to close the network allocators -+ Autorestart containers by default -* Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` -* lxc: Allow set_file_cap capability in container -- Move run -rm to the cli only -* Split stdout stderr -* Always create a new session for the container - -#### Testing - -- Add aggregated docker-ci email report -- Add cleanup to remove leftover containers -* Add nightly release to docker-ci -* Add more tests around auth.ResolveAuthConfig -- Remove a few errors in tests -- Catch errClosing error when TCP and UDP proxies are terminated -* Only run certain tests with TESTFLAGS='-run TestName' make.sh -* Prevent docker-ci to test closing PRs -* Replace panic by log.Fatal in tests -- Increase TestRunDetach timeout - -#### Documentation - -* Add initial draft of the Docker infrastructure doc -* Add devenvironment link to CONTRIBUTING.md -* Add `apt-get install curl` to Ubuntu docs -* Add explanation for export restrictions -* Add .dockercfg doc -* Remove Gentoo install notes about #1422 workaround -* Fix help text for -v option -* Fix Ping endpoint documentation -- Fix parameter names in docs for ADD command -- Fix ironic typo in changelog -* Various command fixes in postgres example -* Document how to edit and release docs -- Minor updates to `postgresql_service.rst` -* Clarify LGTM process to contributors -- Corrected error in the package name -* Document what `vagrant up` is actually doing -+ improve doc search results -* Cleanup whitespace in API 1.5 docs -* use angle brackets in MAINTAINER example email -* Update archlinux.rst -+ Changes to a new style for the docs. Includes version switcher. -* Formatting, add information about multiline json -* Improve registry and index REST API documentation -- Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3 -* Update Gentoo installation documentation now that we're in the portage tree proper -* Cleanup and reorganize docs and tooling for contributors and maintainers -- Minor spelling correction of protocoll -> protocol - -#### Contrib - -* Add vim syntax highlighting for Dockerfiles from @honza -* Add mkimage-arch.sh -* Reorganize contributed completion scripts to add zsh completion - -#### Hack - -* Add vagrant user to the docker group -* Add proper bash completion for "docker push" -* Add xz utils as a runtime dep -* Add cleanup/refactor portion of #2010 for hack and Dockerfile updates -+ Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link -* Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly -+ Add @tianon to hack/MAINTAINERS -* Improve network performance for VirtualBox -* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.) -- Fix contrib/mkimage-debian.sh apt caching prevention -+ Add Dockerfile.tmLanguage to contrib -* Configured FPM to make /etc/init/docker.conf a config file -* Enable SSH Agent forwarding in Vagrant VM -* Several small tweaks/fixes for contrib/mkimage-debian.sh - -#### Other - -- Builder: Abort build if mergeConfig returns an error and fix duplicate error message -- Packaging: Remove deprecated packaging directory -- Registry: Use correct auth config when logging in. -- Registry: Fix the error message so it is the same as the regex - -## 0.6.3 (2013-09-23) - -#### Packaging - -* Add 'docker' group on install for ubuntu package -* Update tar vendor dependency -* Download apt key over HTTPS - -#### Runtime - -- Only copy and change permissions on non-bindmount volumes -* Allow multiple volumes-from -- Fix HTTP imports from STDIN - -#### Documentation - -* Update section on extracting the docker binary after build -* Update development environment docs for new build process -* Remove 'base' image from documentation - -#### Other - -- Client: Fix detach issue -- Registry: Update regular expression to match index - -## 0.6.2 (2013-09-17) - -#### Runtime - -+ Add domainname support -+ Implement image filtering with path.Match -* Remove unnecessary warnings -* Remove os/user dependency -* Only mount the hostname file when the config exists -* Handle signals within the `docker login` command -- UID and GID are now also applied to volumes -- `docker start` set error code upon error -- `docker run` set the same error code as the process started - -#### Builder - -+ Add -rm option in order to remove intermediate containers -* Allow multiline for the RUN instruction - -#### Registry - -* Implement login with private registry -- Fix push issues - -#### Other - -+ Hack: Vendor all dependencies -* Remote API: Bump to v1.5 -* Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. -* Documentation: General improvements - -## 0.6.1 (2013-08-23) - -#### Registry - -* Pass "meta" headers in API calls to the registry - -#### Packaging - -- Use correct upstart script with new build tool -- Use libffi-dev, don`t build it from sources -- Remove duplicate mercurial install command - -## 0.6.0 (2013-08-22) - -#### Runtime - -+ Add lxc-conf flag to allow custom lxc options -+ Add an option to set the working directory -* Add Image name to LogEvent tests -+ Add -privileged flag and relevant tests, docs, and examples -* Add websocket support to /container//attach/ws -* Add warning when net.ipv4.ip_forwarding = 0 -* Add hostname to environment -* Add last stable version in `docker version` -- Fix race conditions in parallel pull -- Fix Graph ByParent() to generate list of child images per parent image. -- Fix typo: fmt.Sprint -> fmt.Sprintf -- Fix small \n error un docker build -* Fix to "Inject dockerinit at /.dockerinit" -* Fix #910. print user name to docker info output -* Use Go 1.1.2 for dockerbuilder -* Use ranged for loop on channels -- Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete -- Improve CMD, ENTRYPOINT, and attach docs. -- Improve connect message with socket error -- Load authConfig only when needed and fix useless WARNING -- Show tag used when image is missing -* Apply volumes-from before creating volumes -- Make docker run handle SIGINT/SIGTERM -- Prevent crash when .dockercfg not readable -- Install script should be fetched over https, not http. -* API, issue 1471: Use groups for socket permissions -- Correctly detect IPv4 forwarding -* Mount /dev/shm as a tmpfs -- Switch from http to https for get.docker.io -* Let userland proxy handle container-bound traffic -* Update the Docker CLI to specify a value for the "Host" header. -- Change network range to avoid conflict with EC2 DNS -- Reduce connect and read timeout when pinging the registry -* Parallel pull -- Handle ip route showing mask-less IP addresses -* Allow ENTRYPOINT without CMD -- Always consider localhost as a domain name when parsing the FQN repos name -* Refactor checksum - -#### Documentation - -* Add MongoDB image example -* Add instructions for creating and using the docker group -* Add sudo to examples and installation to documentation -* Add ufw doc -* Add a reference to ps -a -* Add information about Docker`s high level tools over LXC. -* Fix typo in docs for docker run -dns -* Fix a typo in the ubuntu installation guide -* Fix to docs regarding adding docker groups -* Update default -H docs -* Update readme with dependencies for building -* Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 -* PostgreSQL service example in documentation -* Suggest installing linux-headers by default. -* Change the twitter handle -* Clarify Amazon EC2 installation -* 'Base' image is deprecated and should no longer be referenced in the docs. -* Move note about officially supported kernel -- Solved the logo being squished in Safari - -#### Builder - -+ Add USER instruction do Dockerfile -+ Add workdir support for the Buildfile -* Add no cache for docker build -- Fix docker build and docker events output -- Only count known instructions as build steps -- Make sure ENV instruction within build perform a commit each time -- Forbid certain paths within docker build ADD -- Repository name (and optionally a tag) in build usage -- Make sure ADD will create everything in 0755 - -#### Remote API - -* Sort Images by most recent creation date. -* Reworking opaque requests in registry module -* Add image name in /events -* Use mime pkg to parse Content-Type -* 650 http utils and user agent field - -#### Hack - -+ Bash Completion: Limit commands to containers of a relevant state -* Add docker dependencies coverage testing into docker-ci - -#### Packaging - -+ Docker-brew 0.5.2 support and memory footprint reduction -* Add new docker dependencies into docker-ci -- Revert "docker.upstart: avoid spawning a `sh` process" -+ Docker-brew and Docker standard library -+ Release docker with docker -* Fix the upstart script generated by get.docker.io -* Enabled the docs to generate manpages. -* Revert Bind daemon to 0.0.0.0 in Vagrant. - -#### Register - -* Improve auth push -* Registry unit tests + mock registry - -#### Tests - -* Improve TestKillDifferentUser to prevent timeout on buildbot -- Fix typo in TestBindMounts (runContainer called without image) -* Improve TestGetContainersTop so it does not rely on sleep -* Relax the lo interface test to allow iface index != 1 -* Add registry functional test to docker-ci -* Add some tests in server and utils - -#### Other - -* Contrib: bash completion script -* Client: Add docker cp command and copy api endpoint to copy container files/folders to the host -* Don`t read from stdout when only attached to stdin - -## 0.5.3 (2013-08-13) - -#### Runtime - -* Use docker group for socket permissions -- Spawn shell within upstart script -- Handle ip route showing mask-less IP addresses -- Add hostname to environment - -#### Builder - -- Make sure ENV instruction within build perform a commit each time - -## 0.5.2 (2013-08-08) - -* Builder: Forbid certain paths within docker build ADD -- Runtime: Change network range to avoid conflict with EC2 DNS -* API: Change daemon to listen on unix socket by default - -## 0.5.1 (2013-07-30) - -#### Runtime - -+ Add `ps` args to `docker top` -+ Add support for container ID files (pidfile like) -+ Add container=lxc in default env -+ Support networkless containers with `docker run -n` and `docker -d -b=none` -* Stdout/stderr logs are now stored in the same file as JSON -* Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. -* Change .dockercfg format to json and support multiple auth remote -- Do not override volumes from config -- Fix issue with EXPOSE override - -#### API - -+ Docker client now sets useragent (RFC 2616) -+ Add /events endpoint - -#### Builder - -+ ADD command now understands URLs -+ CmdAdd and CmdEnv now respect Dockerfile-set ENV variables -- Create directories with 755 instead of 700 within ADD instruction - -#### Hack - -* Simplify unit tests with helpers -* Improve docker.upstart event -* Add coverage testing into docker-ci - -## 0.5.0 (2013-07-17) - -#### Runtime - -+ List all processes running inside a container with 'docker top' -+ Host directories can be mounted as volumes with 'docker run -v' -+ Containers can expose public UDP ports (eg, '-p 123/udp') -+ Optionally specify an exact public port (eg. '-p 80:4500') -* 'docker login' supports additional options -- Don't save a container`s hostname when committing an image. - -#### Registry - -+ New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries -- Fix issues when uploading images to a private registry - -#### Builder - -+ ENTRYPOINT instruction sets a default binary entry point to a container -+ VOLUME instruction marks a part of the container as persistent data -* 'docker build' displays the full output of a build by default - -## 0.4.8 (2013-07-01) - -+ Builder: New build operation ENTRYPOINT adds an executable entry point to the container. - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. -- Tests: Fix issues in the test suite - -## 0.4.7 (2013-06-28) - -#### Remote API - -* The progress bar updates faster when downloading and uploading large files -- Fix a bug in the optional unix socket transport - -#### Runtime - -* Improve detection of kernel version -+ Host directories can be mounted as volumes with 'docker run -b' -- fix an issue when only attaching to stdin -* Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts - -#### Hack - -* Improve test suite and dev environment -* Remove dependency on unit tests on 'os/user' - -#### Other - -* Registry: easier push/pull to a custom registry -+ Documentation: add terminology section - -## 0.4.6 (2013-06-22) - -- Runtime: fix a bug which caused creation of empty images (and volumes) to crash. - -## 0.4.5 (2013-06-21) - -+ Builder: 'docker build git://URL' fetches and builds a remote git repository -* Runtime: 'docker ps -s' optionally prints container size -* Tests: improved and simplified -- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. -- Builder: fix a regression when using ADD with single regular file. - -## 0.4.4 (2013-06-19) - -- Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. - -## 0.4.3 (2013-06-19) - -#### Builder - -+ ADD of a local file will detect tar archives and unpack them -* ADD improvements: use tar for copy + automatically unpack local archives -* ADD uses tar/untar for copies instead of calling 'cp -ar' -* Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. -- Fix a bug which caused builds to fail if ADD was the first command -* Nicer output for 'docker build' - -#### Runtime - -* Remove bsdtar dependency -* Add unix socket and multiple -H support -* Prevent rm of running containers -* Use go1.1 cookiejar -- Fix issue detaching from running TTY container -- Forbid parallel push/pull for a single image/repo. Fixes #311 -- Fix race condition within Run command when attaching. - -#### Client - -* HumanReadable ProgressBar sizes in pull -* Fix docker version`s git commit output - -#### API - -* Send all tags on History API call -* Add tag lookup to history command. Fixes #882 - -#### Documentation - -- Fix missing command in irc bouncer example - -## 0.4.2 (2013-06-17) - -- Packaging: Bumped version to work around an Ubuntu bug - -## 0.4.1 (2013-06-17) - -#### Remote Api - -+ Add flag to enable cross domain requests -+ Add images and containers sizes in docker ps and docker images - -#### Runtime - -+ Configure dns configuration host-wide with 'docker -d -dns' -+ Detect faulty DNS configuration and replace it with a public default -+ Allow docker run : -+ You can now specify public port (ex: -p 80:4500) -* Improve image removal to garbage-collect unreferenced parents - -#### Client - -* Allow multiple params in inspect -* Print the container id before the hijack in `docker run` - -#### Registry - -* Add regexp check on repo`s name -* Move auth to the client -- Remove login check on pull - -#### Other - -* Vagrantfile: Add the rest api port to vagrantfile`s port_forward -* Upgrade to Go 1.1 -- Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n - -## 0.4.0 (2013-06-03) - -#### Builder - -+ Introducing Builder -+ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile - -#### Remote API - -+ Introducing Remote API -+ control Docker programmatically using a simple HTTP/json API - -#### Runtime - -* Various reliability and usability improvements - -## 0.3.4 (2013-05-30) - -#### Builder - -+ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile -+ 'docker build -t FOO' applies the tag FOO to the newly built container. - -#### Runtime - -+ Interactive TTYs correctly handle window resize -* Fix how configuration is merged between layers - -#### Remote API - -+ Split stdout and stderr on 'docker run' -+ Optionally listen on a different IP and port (use at your own risk) - -#### Documentation - -* Improve install instructions. - -## 0.3.3 (2013-05-23) - -- Registry: Fix push regression -- Various bugfixes - -## 0.3.2 (2013-05-09) - -#### Registry - -* Improve the checksum process -* Use the size to have a good progress bar while pushing -* Use the actual archive if it exists in order to speed up the push -- Fix error 400 on push - -#### Runtime - -* Store the actual archive on commit - -## 0.3.1 (2013-05-08) - -#### Builder - -+ Implement the autorun capability within docker builder -+ Add caching to docker builder -+ Add support for docker builder with native API as top level command -+ Implement ENV within docker builder -- Check the command existence prior create and add Unit tests for the case -* use any whitespaces instead of tabs - -#### Runtime - -+ Add go version to debug infos -* Kernel version - don`t show the dash if flavor is empty - -#### Registry - -+ Add docker search top level command in order to search a repository -- Fix pull for official images with specific tag -- Fix issue when login in with a different user and trying to push -* Improve checksum - async calculation - -#### Images - -+ Output graph of images to dot (graphviz) -- Fix ByParent function - -#### Documentation - -+ New introduction and high-level overview -+ Add the documentation for docker builder -- CSS fix for docker documentation to make REST API docs look better. -- Fix CouchDB example page header mistake -- Fix README formatting -* Update www.docker.io website. - -#### Other - -+ Website: new high-level overview -- Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc -* Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker - -## 0.3.0 (2013-05-06) - -#### Runtime - -- Fix the command existence check -- strings.Split may return an empty string on no match -- Fix an index out of range crash if cgroup memory is not - -#### Documentation - -* Various improvements -+ New example: sharing data between 2 couchdb databases - -#### Other - -* Vagrant: Use only one deb line in /etc/apt -+ Registry: Implement the new registry - -## 0.2.2 (2013-05-03) - -+ Support for data volumes ('docker run -v=PATH') -+ Share data volumes between containers ('docker run -volumes-from') -+ Improve documentation -* Upgrade to Go 1.0.3 -* Various upgrades to the dev environment for contributors - -## 0.2.1 (2013-05-01) - -+ 'docker commit -run' bundles a layer with default runtime options: command, ports etc. -* Improve install process on Vagrant -+ New Dockerfile operation: "maintainer" -+ New Dockerfile operation: "expose" -+ New Dockerfile operation: "cmd" -+ Contrib script to build a Debian base layer -+ 'docker -d -r': restart crashed containers at daemon startup -* Runtime: improve test coverage - -## 0.2.0 (2013-04-23) - -- Runtime: ghost containers can be killed and waited for -* Documentation: update install instructions -- Packaging: fix Vagrantfile -- Development: automate releasing binaries and ubuntu packages -+ Add a changelog -- Various bugfixes - -## 0.1.8 (2013-04-22) - -- Dynamically detect cgroup capabilities -- Issue stability warning on kernels <3.8 -- 'docker push' buffers on disk instead of memory -- Fix 'docker diff' for removed files -- Fix 'docker stop' for ghost containers -- Fix handling of pidfile -- Various bugfixes and stability improvements - -## 0.1.7 (2013-04-18) - -- Container ports are available on localhost -- 'docker ps' shows allocated TCP ports -- Contributors can run 'make hack' to start a continuous integration VM -- Streamline ubuntu packaging & uploading -- Various bugfixes and stability improvements - -## 0.1.6 (2013-04-17) - -- Record the author an image with 'docker commit -author' - -## 0.1.5 (2013-04-17) - -- Disable standalone mode -- Use a custom DNS resolver with 'docker -d -dns' -- Detect ghost containers -- Improve diagnosis of missing system capabilities -- Allow disabling memory limits at compile time -- Add debian packaging -- Documentation: installing on Arch Linux -- Documentation: running Redis on docker -- Fix lxc 0.9 compatibility -- Automatically load aufs module -- Various bugfixes and stability improvements - -## 0.1.4 (2013-04-09) - -- Full support for TTY emulation -- Detach from a TTY session with the escape sequence `C-p C-q` -- Various bugfixes and stability improvements -- Minor UI improvements -- Automatically create our own bridge interface 'docker0' - -## 0.1.3 (2013-04-04) - -- Choose TCP frontend port with '-p :PORT' -- Layer format is versioned -- Major reliability improvements to the process manager -- Various bugfixes and stability improvements - -## 0.1.2 (2013-04-03) - -- Set container hostname with 'docker run -h' -- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' -- Various bugfixes and stability improvements -- UI polish -- Progress bar on push/pull -- Use XZ compression by default -- Make IP allocator lazy - -## 0.1.1 (2013-03-31) - -- Display shorthand IDs for convenience -- Stabilize process management -- Layers can include a commit message -- Simplified 'docker attach' -- Fix support for re-attaching -- Various bugfixes and stability improvements -- Auto-download at run -- Auto-login on push -- Beefed up documentation - -## 0.1.0 (2013-03-23) - -Initial public release - -- Implement registry in order to push/pull images -- TCP port allocation -- Fix termcaps on Linux -- Add documentation -- Add Vagrant support with Vagrantfile -- Add unit tests -- Add repository/tags to ease image management -- Improve the layer implementation diff --git a/vendor/github.com/docker/docker/CONTRIBUTING.md b/vendor/github.com/docker/docker/CONTRIBUTING.md deleted file mode 100644 index 6b875d69..00000000 --- a/vendor/github.com/docker/docker/CONTRIBUTING.md +++ /dev/null @@ -1,436 +0,0 @@ -# Contributing to Docker - -Want to hack on Docker? Awesome! We have a contributor's guide that explains -[setting up a Docker development environment and the contribution -process](https://docs.docker.com/opensource/project/who-written-for/). - -![Contributors guide](docs/static_files/contributors.png) - -This page contains information about reporting issues as well as some tips and -guidelines useful to experienced open source contributors. Finally, make sure -you read our [community guidelines](#docker-community-guidelines) before you -start participating. - -## Topics - -* [Reporting Security Issues](#reporting-security-issues) -* [Design and Cleanup Proposals](#design-and-cleanup-proposals) -* [Reporting Issues](#reporting-other-issues) -* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) -* [Community Guidelines](#docker-community-guidelines) - -## Reporting security issues - -The Docker maintainers take security seriously. If you discover a security -issue, please bring it to their attention right away! - -Please **DO NOT** file a public issue, instead send your report privately to -[security@docker.com](mailto:security@docker.com). - -Security reports are greatly appreciated and we will publicly thank you for it. -We also like to send gifts—if you're into Docker schwag, make sure to let -us know. We currently do not offer a paid security bounty program, but are not -ruling it out in the future. - - -## Reporting other issues - -A great way to contribute to the project is to send a detailed report when you -encounter an issue. We always appreciate a well-written, thorough bug report, -and will thank you for it! - -Check that [our issue database](https://github.com/docker/docker/issues) -doesn't already include that problem or suggestion before submitting an issue. -If you find a match, you can use the "subscribe" button to get notified on -updates. Do *not* leave random "+1" or "I have this too" comments, as they -only clutter the discussion, and don't help resolving it. However, if you -have ways to reproduce the issue or have additional information that may help -resolving the issue, please leave a comment. - -When reporting issues, always include: - -* The output of `docker version`. -* The output of `docker info`. - -Also include the steps required to reproduce the problem if possible and -applicable. This information will help us review and fix your issue faster. -When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). -Don't forget to remove sensitive data from your logfiles before posting (you can -replace those parts with "REDACTED"). - -**Issue Report Template**: - -``` -Description of problem: - - -`docker version`: - - -`docker info`: - - -`uname -a`: - - -Environment details (AWS, VirtualBox, physical, etc.): - - -How reproducible: - - -Steps to Reproduce: -1. -2. -3. - - -Actual Results: - - -Expected Results: - - -Additional info: - - - -``` - - -##Quick contribution tips and guidelines - -This section gives the experienced contributor some tips and guidelines. - -###Pull requests are always welcome - -Not sure if that typo is worth a pull request? Found a bug and know how to fix -it? Do it! We will appreciate it. Any significant improvement should be -documented as [a GitHub issue](https://github.com/docker/docker/issues) before -anybody starts working on it. - -We are always thrilled to receive pull requests. We do our best to process them -quickly. If your pull request is not accepted on the first try, -don't get discouraged! Our contributor's guide explains [the review process we -use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). - -### Design and cleanup proposals - -You can propose new designs for existing Docker features. You can also design -entirely new features. We really appreciate contributors who want to refactor or -otherwise cleanup our project. For information on making these types of -contributions, see [the advanced contribution -section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in -the contributors guide. - -We try hard to keep Docker lean and focused. Docker can't do everything for -everybody. This means that we might decide against incorporating a new feature. -However, there might be a way to implement that feature *on top of* Docker. - -### Talking to other Docker users and contributors - - - - - - - - - - - - - - - - - - - - -
Internet Relay Chat (IRC) -

- IRC a direct line to our most knowledgeable Docker users; we have - both the #docker and #docker-dev group on - irc.freenode.net. - IRC is a rich chat protocol but it can overwhelm new users. You can search - our chat archives. -

- Read our IRC quickstart guide for an easy way to get started. -
Google Groups - There are two groups. - Docker-user - is for people using Docker containers. - The docker-dev - group is for contributors and other people contributing to the Docker - project. - You can join them without an google account by sending an email to e.g. "docker-user+subscribe@googlegroups.com". - After receiving the join-request message, you can simply reply to that to confirm the subscribtion. -
Twitter - You can follow Docker's Twitter feed - to get updates on our products. You can also tweet us questions or just - share blogs or stories. -
Stack Overflow - Stack Overflow has over 17000 Docker questions listed. We regularly - monitor Docker questions - and so do many other knowledgeable Docker users. -
- - -### Conventions - -Fork the repository and make changes on your fork in a feature branch: - -- If it's a bug fix branch, name it XXXX-something where XXXX is the number of - the issue. -- If it's a feature branch, create an enhancement issue to announce - your intentions, and name it XXXX-something where XXXX is the number of the - issue. - -Submit unit tests for your changes. Go has a great test framework built in; use -it! Take a look at existing tests for inspiration. [Run the full test -suite](https://docs.docker.com/opensource/project/test-and-docs/) on your branch before -submitting a pull request. - -Update the documentation when creating or modifying features. Test your -documentation changes for clarity, concision, and correctness, as well as a -clean documentation build. See our contributors guide for [our style -guide](https://docs.docker.com/opensource/doc-style) and instructions on [building -the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation). - -Write clean code. Universally formatted code promotes ease of writing, reading, -and maintenance. Always run `gofmt -s -w file.go` on each changed file before -committing your changes. Most editors have plug-ins that do this automatically. - -Pull request descriptions should be as clear as possible and include a reference -to all the issues that they address. - -Commit messages must start with a capitalized and short summary (max. 50 chars) -written in the imperative, followed by an optional, more detailed explanatory -text which is separated from the summary by an empty line. - -Code review comments may be added to your pull request. Discuss, then make the -suggested modifications and push additional commits to your feature branch. Post -a comment after pushing. New commits show up in the pull request automatically, -but the reviewers are notified only when you comment. - -Pull requests must be cleanly rebased on top of master without multiple branches -mixed into the PR. - -**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your -feature branch to update your pull request rather than `merge master`. - -Before you make a pull request, squash your commits into logical units of work -using `git rebase -i` and `git push -f`. A logical unit of work is a consistent -set of patches that should be reviewed together: for example, upgrading the -version of a vendored dependency and taking advantage of its now available new -feature constitute two separate units of work. Implementing a new function and -calling it in another file constitute a single logical unit of work. The very -high majority of submissions should have a single commit, so if in doubt: squash -down to one. - -After every commit, [make sure the test suite passes] -(https://docs.docker.com/opensource/project/test-and-docs/). Include documentation -changes in the same pull request so that a revert would remove all traces of -the feature or fix. - -Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that -close an issue. Including references automatically closes the issue on a merge. - -Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly -from the Git history. - -Please see the [Coding Style](#coding-style) for further guidelines. - -### Merge approval - -Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to -indicate acceptance. - -A change requires LGTMs from an absolute majority of the maintainers of each -component affected. For example, if a change affects `docs/` and `registry/`, it -needs an absolute majority from the maintainers of `docs/` AND, separately, an -absolute majority of the maintainers of `registry/`. - -For more details, see the [MAINTAINERS](MAINTAINERS) page. - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. - -Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still -accepted, so there is no need to update outstanding pull requests to the new -format right away, but please do adjust your processes for future contributions. - -### How can I become a maintainer? - -The procedures for adding new maintainers are explained in the -global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS) -file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/) -repository. - -Don't forget: being a maintainer is a time investment. Make sure you -will have time to make yourself available. You don't have to be a -maintainer to make a difference on the project! - -## Docker community guidelines - -We want to keep the Docker community awesome, growing and collaborative. We need -your help to keep it that way. To help with this we've come up with some general -guidelines for the community as a whole: - -* Be nice: Be courteous, respectful and polite to fellow community members: - no regional, racial, gender, or other abuse will be tolerated. We like - nice people way better than mean ones! - -* Encourage diversity and participation: Make everyone in our community feel - welcome, regardless of their background and the extent of their - contributions, and do everything possible to encourage participation in - our community. - -* Keep it legal: Basically, don't get us in trouble. Share only content that - you own, do not share private or sensitive information, and don't break - the law. - -* Stay on topic: Make sure that you are posting to the correct channel and - avoid off-topic discussions. Remember when you update an issue or respond - to an email you are potentially sending to a large number of people. Please - consider this before you update. Also remember that nobody likes spam. - -* Don't send email to the maintainers: There's no need to send email to the - maintainers to ask them to investigate an issue or to take a look at a - pull request. Instead of sending an email, GitHub mentions should be - used to ping maintainers to review a pull request, a proposal or an - issue. - -### Guideline violations — 3 strikes method - -The point of this section is not to find opportunities to punish people, but we -do need a fair way to deal with people who are making our community suck. - -1. First occurrence: We'll give you a friendly, but public reminder that the - behavior is inappropriate according to our guidelines. - -2. Second occurrence: We will send you a private message with a warning that - any additional violations will result in removal from the community. - -3. Third occurrence: Depending on the violation, we may need to delete or ban - your account. - -**Notes:** - -* Obvious spammers are banned on first occurrence. If we don't do this, we'll - have spam all over the place. - -* Violations are forgiven after 6 months of good behavior, and we won't hold a - grudge. - -* People who commit minor infractions will get some education, rather than - hammering them in the 3 strikes process. - -* The rules apply equally to everyone in the community, no matter how much - you've contributed. - -* Extreme violations of a threatening, abusive, destructive or illegal nature - will be addressed immediately and are not subject to 3 strikes or forgiveness. - -* Contact abuse@docker.com to report abuse or appeal violations. In the case of - appeals, we know that mistakes happen, and we'll work with you to come up with a - fair solution if there has been a misunderstanding. - -## Coding Style - -Unless explicitly stated, we follow all coding guidelines from the Go -community. While some of these standards may seem arbitrary, they somehow seem -to result in a solid, consistent codebase. - -It is possible that the code base does not currently comply with these -guidelines. We are not looking for a massive PR that fixes this, since that -goes against the spirit of the guidelines. All new contributions should make a -best effort to clean up and make the code base better than they left it. -Obviously, apply your best judgement. Remember, the goal here is to make the -code base easier for humans to navigate and understand. Always keep that in -mind when nudging others to comply. - -The rules: - -1. All code should be formatted with `gofmt -s`. -2. All code should pass the default levels of - [`golint`](https://github.com/golang/lint). -3. All code should follow the guidelines covered in [Effective - Go](http://golang.org/doc/effective_go.html) and [Go Code Review - Comments](https://github.com/golang/go/wiki/CodeReviewComments). -4. Comment the code. Tell us the why, the history and the context. -5. Document _all_ declarations and methods, even private ones. Declare - expectations, caveats and anything else that may be important. If a type - gets exported, having the comments already there will ensure it's ready. -6. Variable name length should be proportional to it's context and no longer. - `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. - In practice, short methods will have short variable names and globals will - have longer names. -7. No underscores in package names. If you need a compound name, step back, - and re-examine why you need a compound name. If you still think you need a - compound name, lose the underscore. -8. No utils or helpers packages. If a function is not general enough to - warrant it's own package, it has not been written generally enough to be a - part of a util package. Just leave it unexported and well-documented. -9. All tests should run with `go test` and outside tooling should not be - required. No, we don't need another unit testing framework. Assertion - packages are acceptable if they provide _real_ incremental value. -10. Even though we call these "rules" above, they are actually just - guidelines. Since you've read all the rules, you now know that. - -If you are having trouble getting into the mood of idiomatic Go, we recommend -reading through [Effective Go](http://golang.org/doc/effective_go.html). The -[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the -kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/docker/Dockerfile b/vendor/github.com/docker/docker/Dockerfile deleted file mode 100644 index 41509d54..00000000 --- a/vendor/github.com/docker/docker/Dockerfile +++ /dev/null @@ -1,280 +0,0 @@ -# This file describes the standard way to build Docker, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test -# -# # Publish a release: -# docker run --privileged \ -# -e AWS_S3_BUCKET=baz \ -# -e AWS_ACCESS_KEY=foo \ -# -e AWS_SECRET_KEY=bar \ -# -e GPG_PASSPHRASE=gloubiboulga \ -# docker hack/release.sh -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM debian:jessie - -# add zfs ppa -RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 \ - || apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 -RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list - -# add llvm repo -RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 6084F3CF814B57C1CF12EFD515CF4D18AF4F7421 \ - || apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys 6084F3CF814B57C1CF12EFD515CF4D18AF4F7421 -RUN echo deb http://llvm.org/apt/jessie/ llvm-toolchain-jessie-3.8 main > /etc/apt/sources.list.d/llvm.list - -# allow replacing httpredir mirror -ARG APT_MIRROR=httpredir.debian.org -RUN sed -i s/httpredir.debian.org/$APT_MIRROR/g /etc/apt/sources.list - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - apt-utils \ - aufs-tools \ - automake \ - bash-completion \ - binutils-mingw-w64 \ - bsdmainutils \ - btrfs-tools \ - build-essential \ - clang-3.8 \ - createrepo \ - curl \ - dpkg-sig \ - gcc-mingw-w64 \ - git \ - iptables \ - jq \ - libapparmor-dev \ - libcap-dev \ - libltdl-dev \ - libsqlite3-dev \ - libsystemd-journal-dev \ - libtool \ - mercurial \ - net-tools \ - pkg-config \ - python-dev \ - python-mock \ - python-pip \ - python-websocket \ - ubuntu-zfs \ - xfsprogs \ - libzfs-dev \ - tar \ - zip \ - --no-install-recommends \ - && pip install awscli==1.10.15 \ - && ln -snf /usr/bin/clang-3.8 /usr/local/bin/clang \ - && ln -snf /usr/bin/clang++-3.8 /usr/local/bin/clang++ - -# Get lvm2 source for compiling statically -ENV LVM2_VERSION 2.02.103 -RUN mkdir -p /usr/local/lvm2 \ - && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ - | tar -xzC /usr/local/lvm2 --strip-components=1 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 \ - && ./configure \ - --build="$(gcc -print-multiarch)" \ - --enable-static_link \ - && make device-mapper \ - && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# Configure the container for OSX cross compilation -ENV OSX_SDK MacOSX10.11.sdk -ENV OSX_CROSS_COMMIT 8aa9b71a394905e6c5f4b59e2b97b87a004658a4 -RUN set -x \ - && export OSXCROSS_PATH="/osxcross" \ - && git clone https://github.com/tpoechtrager/osxcross.git $OSXCROSS_PATH \ - && ( cd $OSXCROSS_PATH && git checkout -q $OSX_CROSS_COMMIT) \ - && curl -sSL https://s3.dockerproject.org/darwin/v2/${OSX_SDK}.tar.xz -o "${OSXCROSS_PATH}/tarballs/${OSX_SDK}.tar.xz" \ - && UNATTENDED=yes OSX_VERSION_MIN=10.6 ${OSXCROSS_PATH}/build.sh -ENV PATH /osxcross/target/bin:$PATH - -# install seccomp: the version shipped in trusty is too old -ENV SECCOMP_VERSION 2.3.0 -RUN set -x \ - && export SECCOMP_PATH="$(mktemp -d)" \ - && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ - | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ - && ( \ - cd "$SECCOMP_PATH" \ - && ./configure --prefix=/usr/local \ - && make \ - && make install \ - && ldconfig \ - ) \ - && rm -rf "$SECCOMP_PATH" - -# Install Go -# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines -# will need updating, to avoid errors. Ping #docker-maintainers on IRC -# with a heads-up. -ENV GO_VERSION 1.5.4 -RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" \ - | tar -xzC /usr/local - -# !!! TEMPORARY HACK !!! -# Because of https://github.com/golang/go/issues/15286 we have to revert to Go 1.5.3 for windows/amd64 in master -# To change which version of Go to compile with, simply prepend PATH with /usr/local/go1.5.3/bin -# and set GOROOT to /usr/local/go1.5.3 -ENV HACK_GO_VERSION 1.5.3 -RUN curl -fsSL "https://storage.googleapis.com/golang/go${HACK_GO_VERSION}.linux-amd64.tar.gz" \ - | tar -xzC /tmp \ - && mv /tmp/go "/usr/local/go${HACK_GO_VERSION}" - -ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor - -# Compile Go for cross compilation -ENV DOCKER_CROSSPLATFORMS \ - linux/386 linux/arm \ - darwin/amd64 \ - freebsd/amd64 freebsd/386 freebsd/arm \ - windows/amd64 windows/386 - -# This has been commented out and kept as reference because we don't support compiling with older Go anymore. -# ENV GOFMT_VERSION 1.3.3 -# RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt - -ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 -# Grab Go's cover tool for dead-simple code coverage testing -# Grab Go's vet tool for examining go code to find suspicious constructs -# and help prevent errors that the compiler might not catch -RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ - && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ - && go install -v golang.org/x/tools/cmd/cover \ - && go install -v golang.org/x/tools/cmd/vet -# Grab Go's lint tool -ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 -RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ - && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ - && go install -v github.com/golang/lint/golint - -# Install two versions of the registry. The first is an older version that -# only supports schema1 manifests. The second is a newer version that supports -# both. This allows integration-cli tests to cover push/pull with both schema1 -# and schema2 manifests. -ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd -ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ - && rm -rf "$GOPATH" - -# Install notary and notary-server -ENV NOTARY_VERSION docker-v1.11-3 -RUN set -x \ - && export GO15VENDOREXPERIMENT=1 \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ - && rm -rf "$GOPATH" - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 7befe694bd21e3c54bb1d7825270ea4bd6864c13 -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT \ - && pip install -r test-requirements.txt - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux - -# Let us use a .bashrc file -RUN ln -sfv $PWD/.bashrc ~/.bashrc - -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ -RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - buildpack-deps:jessie@sha256:25785f89240fbcdd8a74bdaf30dd5599a9523882c6dfc567f2e9ef7cf6f79db6 \ - busybox:latest@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 \ - debian:jessie@sha256:f968f10b4b523737e253a97eac59b0d1420b5c19b69928d35801a6373ffe330e \ - hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7 -# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) - -# Download man page generator -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone --depth 1 -b v1.0.4 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ - && git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ - && go get -v -d github.com/cpuguy83/go-md2man \ - && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ - && rm -rf "$GOPATH" - -# Download toml validator -ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ - && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ - && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ - && rm -rf "$GOPATH" - -# Install runc -ENV RUNC_COMMIT baf6536d6259209c3edfa2b22237af82942d3dfa -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ - && cd "$GOPATH/src/github.com/opencontainers/runc" \ - && git checkout -q "$RUNC_COMMIT" \ - && make static BUILDTAGS="seccomp apparmor selinux" \ - && cp runc /usr/local/bin/docker-runc \ - && rm -rf "$GOPATH" - -# Install containerd -ENV CONTAINERD_COMMIT v0.2.1 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ - && cd "$GOPATH/src/github.com/docker/containerd" \ - && git checkout -q "$CONTAINERD_COMMIT" \ - && make static \ - && cp bin/containerd /usr/local/bin/docker-containerd \ - && cp bin/containerd-shim /usr/local/bin/docker-containerd-shim \ - && cp bin/ctr /usr/local/bin/docker-containerd-ctr \ - && rm -rf "$GOPATH" - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.aarch64 b/vendor/github.com/docker/docker/Dockerfile.aarch64 deleted file mode 100644 index b2721184..00000000 --- a/vendor/github.com/docker/docker/Dockerfile.aarch64 +++ /dev/null @@ -1,211 +0,0 @@ -# This file describes the standard way to build Docker on aarch64, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker -f Dockerfile.aarch64 . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM aarch64/ubuntu:wily - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - aufs-tools \ - automake \ - bash-completion \ - btrfs-tools \ - build-essential \ - createrepo \ - curl \ - dpkg-sig \ - g++ \ - gcc \ - git \ - iptables \ - jq \ - libapparmor-dev \ - libc6-dev \ - libcap-dev \ - libsqlite3-dev \ - libsystemd-dev \ - mercurial \ - net-tools \ - parallel \ - pkg-config \ - python-dev \ - python-mock \ - python-pip \ - python-websocket \ - gccgo \ - --no-install-recommends - -# Install armhf loader to use armv6 binaries on armv8 -RUN dpkg --add-architecture armhf \ - && apt-get update \ - && apt-get install -y libc6:armhf - -# Get lvm2 source for compiling statically -ENV LVM2_VERSION 2.02.103 -RUN mkdir -p /usr/local/lvm2 \ - && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ - | tar -xzC /usr/local/lvm2 --strip-components=1 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags - -# fix platform enablement in lvm2 to support aarch64 properly -RUN set -e \ - && for f in config.guess config.sub; do \ - curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ - done -# "arch.c:78:2: error: #error the arch code needs to know about your machine type" - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 \ - && ./configure \ - --build="$(gcc -print-multiarch)" \ - --enable-static_link \ - && make device-mapper \ - && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# install seccomp: the version shipped in trusty is too old -ENV SECCOMP_VERSION 2.3.0 -RUN set -x \ - && export SECCOMP_PATH="$(mktemp -d)" \ - && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ - | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ - && ( \ - cd "$SECCOMP_PATH" \ - && ./configure --prefix=/usr/local \ - && make \ - && make install \ - && ldconfig \ - ) \ - && rm -rf "$SECCOMP_PATH" - -# Install Go -# We don't have official binary tarballs for ARM64, eigher for Go or bootstrap, -# so we use the official armv6 released binaries as a GOROOT_BOOTSTRAP, and -# build Go from source code. -ENV GO_VERSION 1.5.4 -RUN mkdir /usr/src/go && curl -fsSL https://storage.googleapis.com/golang/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ - && cd /usr/src/go/src \ - && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash - -ENV PATH /usr/src/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor - -# Only install one version of the registry, because old version which support -# schema1 manifests is not working on ARM64, we should skip integration-cli -# tests for schema1 manifests on ARM64. -ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && rm -rf "$GOPATH" - -# Install notary and notary-server -ENV NOTARY_VERSION docker-v1.11-3 -RUN set -x \ - && export GO15VENDOREXPERIMENT=1 \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ - && rm -rf "$GOPATH" - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 7befe694bd21e3c54bb1d7825270ea4bd6864c13 -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT \ - && pip install -r test-requirements.txt - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux - -# Let us use a .bashrc file -RUN ln -sfv $PWD/.bashrc ~/.bashrc - -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ -RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - aarch64/buildpack-deps:jessie@sha256:6aa1d6910791b7ac78265fd0798e5abd6cb3f27ae992f6f960f6c303ec9535f2 \ - aarch64/busybox:latest@sha256:b23a6a37cf269dff6e46d2473b6e227afa42b037e6d23435f1d2bc40fc8c2828 \ - aarch64/debian:jessie@sha256:4be74a41a7c70ebe887b634b11ffe516cf4fcd56864a54941e56bb49883c3170 \ - aarch64/hello-world:latest@sha256:65a4a158587b307bb02db4de41b836addb0c35175bdc801367b1ac1ddeb9afda -# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) - -# Download man page generator -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone --depth 1 -b v1.0.4 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ - && git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ - && go get -v -d github.com/cpuguy83/go-md2man \ - && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ - && rm -rf "$GOPATH" - -# Download toml validator -ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ - && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ - && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ - && rm -rf "$GOPATH" - -# Install runc -ENV RUNC_COMMIT baf6536d6259209c3edfa2b22237af82942d3dfa -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ - && cd "$GOPATH/src/github.com/opencontainers/runc" \ - && git checkout -q "$RUNC_COMMIT" \ - && make static BUILDTAGS="seccomp apparmor selinux" \ - && cp runc /usr/local/bin/docker-runc \ - && rm -rf "$GOPATH" - -# Install containerd -ENV CONTAINERD_COMMIT v0.2.1 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ - && cd "$GOPATH/src/github.com/docker/containerd" \ - && git checkout -q "$CONTAINERD_COMMIT" \ - && make static \ - && cp bin/containerd /usr/local/bin/docker-containerd \ - && cp bin/containerd-shim /usr/local/bin/docker-containerd-shim \ - && cp bin/ctr /usr/local/bin/docker-containerd-ctr \ - && rm -rf "$GOPATH" - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.armhf b/vendor/github.com/docker/docker/Dockerfile.armhf deleted file mode 100644 index f71a78b7..00000000 --- a/vendor/github.com/docker/docker/Dockerfile.armhf +++ /dev/null @@ -1,221 +0,0 @@ -# This file describes the standard way to build Docker on ARMv7, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker -f Dockerfile.armhf . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM armhf/debian:jessie - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - aufs-tools \ - automake \ - bash-completion \ - btrfs-tools \ - build-essential \ - createrepo \ - curl \ - dpkg-sig \ - git \ - iptables \ - jq \ - net-tools \ - libapparmor-dev \ - libcap-dev \ - libltdl-dev \ - libsqlite3-dev \ - libsystemd-journal-dev \ - libtool \ - mercurial \ - pkg-config \ - python-dev \ - python-mock \ - python-pip \ - python-websocket \ - xfsprogs \ - tar \ - --no-install-recommends - -# Get lvm2 source for compiling statically -ENV LVM2_VERSION 2.02.103 -RUN mkdir -p /usr/local/lvm2 \ - && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ - | tar -xzC /usr/local/lvm2 --strip-components=1 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 \ - && ./configure \ - --build="$(gcc -print-multiarch)" \ - --enable-static_link \ - && make device-mapper \ - && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# Install Go -# TODO Update to 1.5.4 once available, or build from source, as these builds -# are marked "end of life", see http://dave.cheney.net/unofficial-arm-tarballs -ENV GO_VERSION 1.5.3 -RUN curl -fsSL "http://dave.cheney.net/paste/go${GO_VERSION}.linux-arm.tar.gz" \ - | tar -xzC /usr/local -ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor - -# we're building for armhf, which is ARMv7, so let's be explicit about that -ENV GOARCH arm -ENV GOARM 7 - -# This has been commented out and kept as reference because we don't support compiling with older Go anymore. -# ENV GOFMT_VERSION 1.3.3 -# RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt - -ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 -# Grab Go's cover tool for dead-simple code coverage testing -# Grab Go's vet tool for examining go code to find suspicious constructs -# and help prevent errors that the compiler might not catch -RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ - && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ - && go install -v golang.org/x/tools/cmd/cover \ - && go install -v golang.org/x/tools/cmd/vet -# Grab Go's lint tool -ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 -RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ - && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ - && go install -v github.com/golang/lint/golint - -# install seccomp: the version shipped in trusty is too old -ENV SECCOMP_VERSION 2.3.0 -RUN set -x \ - && export SECCOMP_PATH="$(mktemp -d)" \ - && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ - | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ - && ( \ - cd "$SECCOMP_PATH" \ - && ./configure --prefix=/usr/local \ - && make \ - && make install \ - && ldconfig \ - ) \ - && rm -rf "$SECCOMP_PATH" - -# Install two versions of the registry. The first is an older version that -# only supports schema1 manifests. The second is a newer version that supports -# both. This allows integration-cli tests to cover push/pull with both schema1 -# and schema2 manifests. -ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd -ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ - && rm -rf "$GOPATH" - -# Install notary and notary-server -ENV NOTARY_VERSION docker-v1.11-3 -RUN set -x \ - && export GO15VENDOREXPERIMENT=1 \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ - && rm -rf "$GOPATH" - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 7befe694bd21e3c54bb1d7825270ea4bd6864c13 -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT \ - && pip install -r test-requirements.txt - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux - -# Let us use a .bashrc file -RUN ln -sfv $PWD/.bashrc ~/.bashrc - -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ -RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - armhf/buildpack-deps:jessie@sha256:ca6cce8e5bf5c952129889b5cc15cd6aa8d995d77e55e3749bbaadae50e476cb \ - armhf/busybox:latest@sha256:d98a7343ac750ffe387e3d514f8521ba69846c216778919b01414b8617cfb3d4 \ - armhf/debian:jessie@sha256:4a2187483f04a84f9830910fe3581d69b3c985cc045d9f01d8e2f3795b28107b \ - armhf/hello-world:latest@sha256:161dcecea0225975b2ad5f768058212c1e0d39e8211098666ffa1ac74cfb7791 -# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) - -# Download man page generator -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone --depth 1 -b v1.0.4 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ - && git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ - && go get -v -d github.com/cpuguy83/go-md2man \ - && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ - && rm -rf "$GOPATH" - -# Download toml validator -ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ - && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ - && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ - && rm -rf "$GOPATH" - -# Install runc -ENV RUNC_COMMIT baf6536d6259209c3edfa2b22237af82942d3dfa -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ - && cd "$GOPATH/src/github.com/opencontainers/runc" \ - && git checkout -q "$RUNC_COMMIT" \ - && make static BUILDTAGS="seccomp apparmor selinux" \ - && cp runc /usr/local/bin/docker-runc \ - && rm -rf "$GOPATH" - -# Install containerd -ENV CONTAINERD_COMMIT v0.2.1 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ - && cd "$GOPATH/src/github.com/docker/containerd" \ - && git checkout -q "$CONTAINERD_COMMIT" \ - && make static \ - && cp bin/containerd /usr/local/bin/docker-containerd \ - && cp bin/containerd-shim /usr/local/bin/docker-containerd-shim \ - && cp bin/ctr /usr/local/bin/docker-containerd-ctr \ - && rm -rf "$GOPATH" - -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.dapper b/vendor/github.com/docker/docker/Dockerfile.dapper new file mode 100644 index 00000000..6a6a18c6 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.dapper @@ -0,0 +1,49 @@ +FROM ubuntu:16.04 + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + curl \ + ca-certificates \ + libc-dev \ + gcc \ + git \ + iptables \ + jq \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libseccomp-dev \ + net-tools \ + iproute2 \ + pkg-config \ + tar \ + vim \ + --no-install-recommends + +# Install go +RUN curl -sLf https://storage.googleapis.com/golang/go1.6.2.linux-amd64.tar.gz | tar xzf - -C /usr/local +ENV GOPATH=/go +ENV PATH=/go/bin:/usr/local/go/bin:$PATH + +# Setup runc +RUN ln -s /go/src/github.com/docker/docker/bin/docker /usr/local/bin/docker-runc + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +# Trash +RUN go get github.com/rancher/trash + +ENV DOCKER_BUILDTAGS apparmor seccomp selinux cgo daemon netgo +ENV DAPPER_SOURCE /go/src/github.com/docker/docker +ENV DAPPER_RUN_ARGS --privileged +ENV DAPPER_OUTPUT bin +ENV DAPPER_ENV TAG REPO +ENV TRASH_CACHE ${DAPPER_SOURCE}/.trash-cache +ENV PATH=${DAPPER_SOURCE}/bin:$PATH + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENTRYPOINT ["./scripts/entry"] diff --git a/vendor/github.com/docker/docker/Dockerfile.gccgo b/vendor/github.com/docker/docker/Dockerfile.gccgo deleted file mode 100644 index 9b40dd40..00000000 --- a/vendor/github.com/docker/docker/Dockerfile.gccgo +++ /dev/null @@ -1,104 +0,0 @@ -# This file describes the standard way to build Docker, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker -f Dockerfile.gccgo . -# - -FROM gcc:5.3 - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - aufs-tools \ - btrfs-tools \ - build-essential \ - curl \ - git \ - iptables \ - jq \ - net-tools \ - libapparmor-dev \ - libcap-dev \ - libsqlite3-dev \ - mercurial \ - net-tools \ - parallel \ - python-dev \ - python-mock \ - python-pip \ - python-websocket \ - --no-install-recommends - -# Get lvm2 source for compiling statically -RUN git clone -b v2_02_103 https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 \ - && ./configure --enable-static_link \ - && make device-mapper \ - && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# install seccomp: the version shipped in jessie is too old -ENV SECCOMP_VERSION v2.3.0 -RUN set -x \ - && export SECCOMP_PATH=$(mktemp -d) \ - && git clone https://github.com/seccomp/libseccomp.git "$SECCOMP_PATH" \ - && ( \ - cd "$SECCOMP_PATH" \ - && git checkout "$SECCOMP_VERSION" \ - && ./autogen.sh \ - && ./configure --prefix=/usr \ - && make \ - && make install \ - ) \ - && rm -rf "$SECCOMP_PATH" - -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 7befe694bd21e3c54bb1d7825270ea4bd6864c13 -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor seccomp selinux - -# Install runc -ENV RUNC_COMMIT baf6536d6259209c3edfa2b22237af82942d3dfa -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ - && cd "$GOPATH/src/github.com/opencontainers/runc" \ - && git checkout -q "$RUNC_COMMIT" \ - && make static BUILDTAGS="seccomp apparmor selinux" \ - && cp runc /usr/local/bin/docker-runc \ - && rm -rf "$GOPATH" - -# Install containerd -ENV CONTAINERD_COMMIT v0.2.1 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ - && cd "$GOPATH/src/github.com/docker/containerd" \ - && git checkout -q "$CONTAINERD_COMMIT" \ - && make static \ - && cp bin/containerd /usr/local/bin/docker-containerd \ - && cp bin/containerd-shim /usr/local/bin/docker-containerd-shim \ - && cp bin/ctr /usr/local/bin/docker-containerd-ctr \ - && rm -rf "$GOPATH" - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.ppc64le b/vendor/github.com/docker/docker/Dockerfile.ppc64le deleted file mode 100644 index e63f36c8..00000000 --- a/vendor/github.com/docker/docker/Dockerfile.ppc64le +++ /dev/null @@ -1,218 +0,0 @@ -# This file describes the standard way to build Docker on ppc64le, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker -f Dockerfile.ppc64le . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM ppc64le/gcc:5.3 - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - aufs-tools \ - automake \ - bash-completion \ - btrfs-tools \ - build-essential \ - createrepo \ - curl \ - dpkg-sig \ - git \ - iptables \ - jq \ - net-tools \ - libapparmor-dev \ - libcap-dev \ - libltdl-dev \ - libsqlite3-dev \ - libsystemd-journal-dev \ - libtool \ - mercurial \ - pkg-config \ - python-dev \ - python-mock \ - python-pip \ - python-websocket \ - xfsprogs \ - tar \ - --no-install-recommends - -# Get lvm2 source for compiling statically -ENV LVM2_VERSION 2.02.103 -RUN mkdir -p /usr/local/lvm2 \ - && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ - | tar -xzC /usr/local/lvm2 --strip-components=1 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags - -# fix platform enablement in lvm2 to support ppc64le properly -RUN set -e \ - && for f in config.guess config.sub; do \ - curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ - done -# "arch.c:78:2: error: #error the arch code needs to know about your machine type" - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 \ - && ./configure \ - --build="$(gcc -print-multiarch)" \ - --enable-static_link \ - && make device-mapper \ - && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -## BUILD GOLANG 1.6 -# NOTE: ppc64le has compatibility issues with older versions of go, so make sure the version >= 1.6 -ENV GO_VERSION 1.6.2 -ENV GO_DOWNLOAD_URL https://golang.org/dl/go${GO_VERSION}.src.tar.gz -ENV GO_DOWNLOAD_SHA256 787b0b750d037016a30c6ed05a8a70a91b2e9db4bd9b1a2453aa502a63f1bccc -ENV GOROOT_BOOTSTRAP /usr/local - -RUN curl -fsSL "$GO_DOWNLOAD_URL" -o golang.tar.gz \ - && echo "$GO_DOWNLOAD_SHA256 golang.tar.gz" | sha256sum -c - \ - && tar -C /usr/src -xzf golang.tar.gz \ - && rm golang.tar.gz \ - && cd /usr/src/go/src && ./make.bash 2>&1 - -ENV GOROOT_BOOTSTRAP /usr/src/ - -ENV PATH /usr/src/go/bin/:/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor - -# This has been commented out and kept as reference because we don't support compiling with older Go anymore. -# ENV GOFMT_VERSION 1.3.3 -# RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt - -ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 -# Grab Go's cover tool for dead-simple code coverage testing -# Grab Go's vet tool for examining go code to find suspicious constructs -# and help prevent errors that the compiler might not catch -RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ - && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ - && go install -v golang.org/x/tools/cmd/cover \ - && go install -v golang.org/x/tools/cmd/vet -# Grab Go's lint tool -ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 -RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ - && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ - && go install -v github.com/golang/lint/golint - -# Install two versions of the registry. The first is an older version that -# only supports schema1 manifests. The second is a newer version that supports -# both. This allows integration-cli tests to cover push/pull with both schema1 -# and schema2 manifests. -ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd -ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ - && rm -rf "$GOPATH" - -# Install notary and notary-server -ENV NOTARY_VERSION docker-v1.11-3 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ - && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ - && rm -rf "$GOPATH" - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 7befe694bd21e3c54bb1d7825270ea4bd6864c13 -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT \ - && pip install -r test-requirements.txt - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux - -# Let us use a .bashrc file -RUN ln -sfv $PWD/.bashrc ~/.bashrc - -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ -RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - ppc64le/buildpack-deps:jessie@sha256:902bfe4ef1389f94d143d64516dd50a2de75bca2e66d4a44b1d73f63ddf05dda \ - ppc64le/busybox:latest@sha256:38bb82085248d5a3c24bd7a5dc146f2f2c191e189da0441f1c2ca560e3fc6f1b \ - ppc64le/debian:jessie@sha256:412845f51b6ab662afba71bc7a716e20fdb9b84f185d180d4c7504f8a75c4f91 \ - ppc64le/hello-world:latest@sha256:186a40a9a02ca26df0b6c8acdfb8ac2f3ae6678996a838f977e57fac9d963974 -# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) - -# Download man page generator -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone --depth 1 -b v1.0.4 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ - && git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ - && go get -v -d github.com/cpuguy83/go-md2man \ - && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ - && rm -rf "$GOPATH" - -# Download toml validator -ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ - && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ - && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ - && rm -rf "$GOPATH" - -# Install runc -ENV RUNC_COMMIT baf6536d6259209c3edfa2b22237af82942d3dfa -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ - && cd "$GOPATH/src/github.com/opencontainers/runc" \ - && git checkout -q "$RUNC_COMMIT" \ - && make static BUILDTAGS="apparmor selinux" \ - && cp runc /usr/local/bin/docker-runc \ - && rm -rf "$GOPATH" - -# Install containerd -ENV CONTAINERD_COMMIT v0.2.1 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ - && cd "$GOPATH/src/github.com/docker/containerd" \ - && git checkout -q "$CONTAINERD_COMMIT" \ - && make static \ - && cp bin/containerd /usr/local/bin/docker-containerd \ - && cp bin/containerd-shim /usr/local/bin/docker-containerd-shim \ - && cp bin/ctr /usr/local/bin/docker-containerd-ctr \ - && rm -rf "$GOPATH" - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.s390x b/vendor/github.com/docker/docker/Dockerfile.s390x deleted file mode 100644 index b6bda59d..00000000 --- a/vendor/github.com/docker/docker/Dockerfile.s390x +++ /dev/null @@ -1,224 +0,0 @@ -# This file describes the standard way to build Docker on s390x, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker -f Dockerfile.s390x . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM s390x/gcc:5.3 - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - aufs-tools \ - automake \ - bash-completion \ - btrfs-tools \ - build-essential \ - createrepo \ - curl \ - dpkg-sig \ - git \ - iptables \ - jq \ - net-tools \ - libapparmor-dev \ - libcap-dev \ - libltdl-dev \ - libsqlite3-dev \ - libsystemd-journal-dev \ - libtool \ - mercurial \ - pkg-config \ - python-dev \ - python-mock \ - python-pip \ - python-websocket \ - xfsprogs \ - tar \ - --no-install-recommends - -# install seccomp: the version shipped in jessie is too old -ENV SECCOMP_VERSION 2.3.0 -RUN set -x \ - && export SECCOMP_PATH="$(mktemp -d)" \ - && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ - | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ - && ( \ - cd "$SECCOMP_PATH" \ - && ./configure --prefix=/usr/local \ - && make \ - && make install \ - && ldconfig \ - ) \ - && rm -rf "$SECCOMP_PATH" - -# Get lvm2 source for compiling statically -ENV LVM2_VERSION 2.02.103 -RUN mkdir -p /usr/local/lvm2 \ - && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ - | tar -xzC /usr/local/lvm2 --strip-components=1 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags - -# fix platform enablement in lvm2 to support s390x properly -RUN set -e \ - && for f in config.guess config.sub; do \ - curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ - done -# "arch.c:78:2: error: #error the arch code needs to know about your machine type" - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 \ - && ./configure \ - --build="$(gcc -print-multiarch)" \ - --enable-static_link \ - && make device-mapper \ - && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# Note: Go comes from the base image (gccgo, specifically) -# We can't compile Go proper because s390x isn't an officially supported architecture yet. - -ENV PATH /go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor - -# This has been commented out and kept as reference because we don't support compiling with older Go anymore. -# ENV GOFMT_VERSION 1.3.3 -# RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt - -# TODO update this sha when we upgrade to Go 1.5+ -ENV GO_TOOLS_COMMIT 069d2f3bcb68257b627205f0486d6cc69a231ff9 -# Grab Go's cover tool for dead-simple code coverage testing -# Grab Go's vet tool for examining go code to find suspicious constructs -# and help prevent errors that the compiler might not catch -RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ - && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ - && go install -v golang.org/x/tools/cmd/cover \ - && go install -v golang.org/x/tools/cmd/vet -# Grab Go's lint tool -ENV GO_LINT_COMMIT f42f5c1c440621302702cb0741e9d2ca547ae80f -RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ - && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ - && go install -v github.com/golang/lint/golint - - -# Install two versions of the registry. The first is an older version that -# only supports schema1 manifests. The second is a newer version that supports -# both. This allows integration-cli tests to cover push/pull with both schema1 -# and schema2 manifests. -ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd -ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ - && rm -rf "$GOPATH" - -# Install notary and notary-server -ENV NOTARY_VERSION docker-v1.11-3 -RUN set -x \ - && export GO15VENDOREXPERIMENT=1 \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION" && ln -s . vendor/src) \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ - && rm -rf "$GOPATH" - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 7befe694bd21e3c54bb1d7825270ea4bd6864c13 -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT \ - && pip install -r test-requirements.txt - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux - -# Let us use a .bashrc file -RUN ln -sfv $PWD/.bashrc ~/.bashrc - -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ -RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - s390x/buildpack-deps:jessie@sha256:4d1381224acaca6c4bfe3604de3af6972083a8558a99672cb6989c7541780099 \ - s390x/busybox:latest@sha256:dd61522c983884a66ed72d60301925889028c6d2d5e0220a8fe1d9b4c6a4f01b \ - s390x/debian:jessie@sha256:b74c863400909eff3c5e196cac9bfd1f6333ce47aae6a38398d87d5875da170a \ - s390x/hello-world:latest@sha256:780d80b3a7677c3788c0d5cd9168281320c8d4a6d9183892d8ee5cdd610f5699 -# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) - -# Download man page generator -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone --depth 1 -b v1.0.4 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ - && git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ - && go get -v -d github.com/cpuguy83/go-md2man \ - && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ - && rm -rf "$GOPATH" - -# Download toml validator -ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ - && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ - && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ - && rm -rf "$GOPATH" - -# Install runc -ENV RUNC_COMMIT baf6536d6259209c3edfa2b22237af82942d3dfa -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ - && cd "$GOPATH/src/github.com/opencontainers/runc" \ - && git checkout -q "$RUNC_COMMIT" \ - && make static BUILDTAGS="seccomp apparmor selinux" \ - && cp runc /usr/local/bin/docker-runc \ - && rm -rf "$GOPATH" - -# Install containerd -ENV CONTAINERD_COMMIT v0.2.1 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ - && cd "$GOPATH/src/github.com/docker/containerd" \ - && git checkout -q "$CONTAINERD_COMMIT" \ - && make static \ - && cp bin/containerd /usr/local/bin/docker-containerd \ - && cp bin/containerd-shim /usr/local/bin/docker-containerd-shim \ - && cp bin/ctr /usr/local/bin/docker-containerd-ctr \ - && rm -rf "$GOPATH" - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.simple b/vendor/github.com/docker/docker/Dockerfile.simple deleted file mode 100644 index e2827fed..00000000 --- a/vendor/github.com/docker/docker/Dockerfile.simple +++ /dev/null @@ -1,85 +0,0 @@ -# docker build -t docker:simple -f Dockerfile.simple . -# docker run --rm docker:simple hack/make.sh dynbinary -# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit -# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli - -# This represents the bare minimum required to build and test Docker. - -FROM debian:jessie - -# compile and runtime deps -# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies -# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - btrfs-tools \ - build-essential \ - curl \ - gcc \ - git \ - libapparmor-dev \ - libdevmapper-dev \ - libsqlite3-dev \ - \ - ca-certificates \ - e2fsprogs \ - iptables \ - procps \ - xfsprogs \ - xz-utils \ - \ - aufs-tools \ - && rm -rf /var/lib/apt/lists/* - -# install seccomp: the version shipped in trusty is too old -ENV SECCOMP_VERSION 2.3.0 -RUN set -x \ - && export SECCOMP_PATH="$(mktemp -d)" \ - && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ - | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ - && ( \ - cd "$SECCOMP_PATH" \ - && ./configure --prefix=/usr/local \ - && make \ - && make install \ - && ldconfig \ - ) \ - && rm -rf "$SECCOMP_PATH" - -# Install Go -# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines -# will need updating, to avoid errors. Ping #docker-maintainers on IRC -# with a heads-up. -ENV GO_VERSION 1.5.3 -RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" \ - | tar -xzC /usr/local -ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor -ENV CGO_LDFLAGS -L/lib - -# Install runc -ENV RUNC_COMMIT baf6536d6259209c3edfa2b22237af82942d3dfa -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ - && cd "$GOPATH/src/github.com/opencontainers/runc" \ - && git checkout -q "$RUNC_COMMIT" \ - && make static BUILDTAGS="seccomp apparmor selinux" \ - && cp runc /usr/local/bin/docker-runc \ - && rm -rf "$GOPATH" - -# Install containerd -ENV CONTAINERD_COMMIT v0.2.1 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ - && cd "$GOPATH/src/github.com/docker/containerd" \ - && git checkout -q "$CONTAINERD_COMMIT" \ - && make static \ - && cp bin/containerd /usr/local/bin/docker-containerd \ - && cp bin/containerd-shim /usr/local/bin/docker-containerd-shim \ - && cp bin/ctr /usr/local/bin/docker-containerd-ctr \ - && rm -rf "$GOPATH" - -ENV AUTO_GOPATH 1 -WORKDIR /usr/src/docker -COPY . /usr/src/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.windows b/vendor/github.com/docker/docker/Dockerfile.windows deleted file mode 100755 index e6712754..00000000 --- a/vendor/github.com/docker/docker/Dockerfile.windows +++ /dev/null @@ -1,89 +0,0 @@ -# This file describes the standard way to build Docker, using a docker container on Windows -# Server 2016 -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. Run this from -# # a directory containing the sources you are validating. For example from -# # c:\go\src\github.com\docker\docker -# -# docker build -t docker -f Dockerfile.windows . -# -# -# # Build docker in a container. Run the following from a Windows cmd command prommpt, -# # replacing c:\built with the directory you want the binaries to be placed on the -# # host system. -# -# docker run --rm -v "c:\built:c:\target" docker sh -c 'cd /c/go/src/github.com/docker/docker; hack/make.sh binary; ec=$?; if [ $ec -eq 0 ]; then robocopy /c/go/src/github.com/docker/docker/bundles/$(cat VERSION)/binary /c/target/binary; fi; exit $ec' -# -# Important notes: -# --------------- -# -# The posix utilities from GIT aren't usable interactively as at January 2016. This -# is because they require a console window which isn't present in a container in Windows. -# See the example at the top of this file. Do NOT use -it in that docker run!!! -# -# Don't try to use a volume for passing the source through. The posix utilities will -# balk at reparse points. Again, see the example at the top of this file on how use a volume -# to get the built binary out of the container. -# -# The steps are minimised dramatically to improve performance - -FROM windowsservercore - -# Environment variable notes: -# - GO_VERSION must consistent with 'Dockerfile' used by Linux'. -# - FROM_DOCKERFILE is used for detection of building within a container. -ENV GO_VERSION=1.5.3 \ - GIT_LOCATION=https://github.com/git-for-windows/git/releases/download/v2.7.2.windows.1/Git-2.7.2-64-bit.exe \ - GOPATH=C:/go;C:/go/src/github.com/docker/docker/vendor \ - FROM_DOCKERFILE=1 - -WORKDIR c:/ - -# Everything downloaded/installed in one go (better performance, esp on TP4) -RUN \ - setx /M Path "c:\git\cmd;c:\git\bin;c:\git\usr\bin;%Path%;c:\gcc\bin;c:\go\bin" && \ - setx GOROOT "c:\go" && \ - powershell -command \ - $ErrorActionPreference = 'Stop'; \ - Function Download-File([string] $source, [string] $target) { \ - $wc = New-Object net.webclient; $wc.Downloadfile($source, $target) \ - } \ - \ - Write-Host INFO: Downloading git...; \ - Download-File %GIT_LOCATION% gitsetup.exe; \ - \ - Write-Host INFO: Downloading go...; \ - Download-File https://storage.googleapis.com/golang/go%GO_VERSION%.windows-amd64.msi go.msi; \ - \ - Write-Host INFO: Downloading compiler 1 of 3...; \ - Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip gcc.zip; \ - \ - Write-Host INFO: Downloading compiler 2 of 3...; \ - Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip runtime.zip; \ - \ - Write-Host INFO: Downloading compiler 3 of 3...; \ - Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip binutils.zip; \ - \ - Write-Host INFO: Installing git...; \ - Start-Process gitsetup.exe -ArgumentList '/VERYSILENT /SUPPRESSMSGBOXES /CLOSEAPPLICATIONS /DIR=c:\git\' -Wait; \ - \ - Write-Host INFO: Installing go..."; \ - Start-Process msiexec -ArgumentList '-i go.msi -quiet' -Wait; \ - \ - Write-Host INFO: Unzipping compiler...; \ - c:\git\usr\bin\unzip.exe -q -o gcc.zip -d /c/gcc; \ - c:\git\usr\bin\unzip.exe -q -o runtime.zip -d /c/gcc; \ - c:\git\usr\bin\unzip.exe -q -o binutils.zip -d /c/gcc"; \ - \ - Write-Host INFO: Removing interim files; \ - Remove-Item *.zip; \ - Remove-Item go.msi; \ - Remove-Item gitsetup.exe; \ - \ - Write-Host INFO: Completed - -# Prepare for building -COPY . /go/src/github.com/docker/docker - diff --git a/vendor/github.com/docker/docker/MAINTAINERS b/vendor/github.com/docker/docker/MAINTAINERS deleted file mode 100644 index 3f494e5d..00000000 --- a/vendor/github.com/docker/docker/MAINTAINERS +++ /dev/null @@ -1,273 +0,0 @@ -# Docker maintainers file -# -# This file describes who runs the docker/docker project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant -# parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - - [Org."Core maintainers"] - - # The Core maintainers are the ghostbusters of the project: when there's a problem others - # can't solve, they show up and fix it with bizarre devices and weaponry. - # They have final say on technical implementation and coding style. - # They are ultimately responsible for quality in all its forms: usability polish, - # bugfixes, performance, stability, etc. When ownership can cleanly be passed to - # a subsystem, they are responsible for doing so and holding the - # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. - - # For each release (including minor releases), a "release captain" is assigned from the - # pool of core maintainers. Rotation is encouraged across all maintainers, to ensure - # the release process is clear and up-to-date. - - people = [ - "aaronlehmann", - "calavera", - "coolljt0725", - "cpuguy83", - "crosbymichael", - "duglin", - "estesp", - "icecrime", - "jhowardmsft", - "lk4d4", - "mavenugo", - "mhbauer", - "runcom", - "tianon", - "tibor", - "tonistiigi", - "unclejack", - "vdemeester" - ] - - [Org."Docs maintainers"] - - # TODO Describe the docs maintainers role. - - people = [ - "jamtur01", - "moxiegirl", - "sven", - "thajeztah" - ] - - [Org.Curators] - - # The curators help ensure that incoming issues and pull requests are properly triaged and - # that our various contribution and reviewing processes are respected. With their knowledge of - # the repository activity, they can also guide contributors to relevant material or - # discussions. - # - # They are neither code nor docs reviewers, so they are never expected to merge. They can - # however: - # - close an issue or pull request when it's an exact duplicate - # - close an issue or pull request when it's inappropriate or off-topic - - people = [ - "programmerq", - "thajeztah" - ] - - [Org.Alumni] - - # This list contains maintainers that are no longer active on the project. - # It is thanks to these people that the project has become what it is today. - # Thank you! - - people = [ - # As a maintainer, Erik was responsible for the "builder", and - # started the first designs for the new networking model in - # Docker. Erik is now working on all kinds of plugins for Docker - # (https://github.com/contiv) and various open source projects - # in his own repository https://github.com/erikh. You may - # still stumble into him in our issue tracker, or on IRC. - "erikh", - - # Vincent "vbatts!" Batts made his first contribution to the project - # in November 2013, to become a maintainer a few months later, on - # May 10, 2014 (https://github.com/docker/docker/commit/d6e666a87a01a5634c250358a94c814bf26cb778). - # As a maintainer, Vincent made important contributions to core elements - # of Docker, such as "distribution" (tarsum) and graphdrivers (btrfs, devicemapper). - # He also contributed the "tar-split" library, an important element - # for the content-addressable store. - # Vincent is currently a member of the Open Containers Initiative - # Technical Oversight Board (TOB), besides his work at Red Hat and - # Project Atomic. You can still find him regularly hanging out in - # our repository and the #docker-dev and #docker-maintainers IRC channels - # for a chat, as he's always a lot of fun. - "vbatts", - - # Victor is one of the earliest contributors to Docker, having worked on the - # project when it was still "dotCloud" in April 2013. He's been responsible - # for multiple releases (https://github.com/docker/docker/pulls?q=is%3Apr+bump+in%3Atitle+author%3Avieux), - # and up until today (2015), our number 2 contributor. Although he's no longer - # a maintainer for the Docker "Engine", he's still actively involved in other - # Docker projects, and most likely can be found in the Docker Swarm repository, - # for which he's a core maintainer. - "vieux", - - # Vishnu became a maintainer to help out on the daemon codebase and - # libcontainer integration. He's currently involved in the - # Open Containers Initiative, working on the specifications, - # besides his work on cAdvisor and Kubernetes for Google. - "vishh" - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.aaronlehmann] - Name = "Aaron Lehmann" - Email = "aaron.lehmann@docker.com" - GitHub = "aaronlehmann" - - [people.calavera] - Name = "David Calavera" - Email = "david.calavera@gmail.com" - GitHub = "calavera" - - [people.coolljt0725] - Name = "Lei Jitang" - Email = "leijitang@huawei.com" - GitHub = "coolljt0725" - - [people.cpuguy83] - Name = "Brian Goff" - Email = "cpuguy83@gmail.com" - Github = "cpuguy83" - - [people.crosbymichael] - Name = "Michael Crosby" - Email = "crosbymichael@gmail.com" - GitHub = "crosbymichael" - - [people.duglin] - Name = "Doug Davis" - Email = "dug@us.ibm.com" - GitHub = "duglin" - - [people.erikh] - Name = "Erik Hollensbe" - Email = "erik@docker.com" - GitHub = "erikh" - - [people.estesp] - Name = "Phil Estes" - Email = "estesp@linux.vnet.ibm.com" - GitHub = "estesp" - - [people.icecrime] - Name = "Arnaud Porterie" - Email = "arnaud@docker.com" - GitHub = "icecrime" - - [people.jamtur01] - Name = "James Turnbull" - Email = "james@lovedthanlost.net" - GitHub = "jamtur01" - - [people.jhowardmsft] - Name = "John Howard" - Email = "jhoward@microsoft.com" - GitHub = "jhowardmsft" - - [people.jfrazelle] - Name = "Jessie Frazelle" - Email = "jess@linux.com" - GitHub = "jfrazelle" - - [people.lk4d4] - Name = "Alexander Morozov" - Email = "lk4d4@docker.com" - GitHub = "lk4d4" - - [people.mavenugo] - Name = "Madhu Venugopal" - Email = "madhu@docker.com" - GitHub = "mavenugo" - - [people.mhbauer] - Name = "Morgan Bauer" - Email = "mbauer@us.ibm.com" - GitHub = "mhbauer" - - [people.moxiegirl] - Name = "Mary Anthony" - Email = "mary.anthony@docker.com" - GitHub = "moxiegirl" - - [people.programmerq] - Name = "Jeff Anderson" - Email = "jeff@docker.com" - GitHub = "programmerq" - - [people.runcom] - Name = "Antonio Murdaca" - Email = "runcom@redhat.com" - GitHub = "runcom" - - [people.shykes] - Name = "Solomon Hykes" - Email = "solomon@docker.com" - GitHub = "shykes" - - [people.sven] - Name = "Sven Dowideit" - Email = "SvenDowideit@home.org.au" - GitHub = "SvenDowideit" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.tianon] - Name = "Tianon Gravi" - Email = "admwiggin@gmail.com" - GitHub = "tianon" - - [people.tibor] - Name = "Tibor Vass" - Email = "tibor@docker.com" - GitHub = "tiborvass" - - [people.tonistiigi] - Name = "Tõnis Tiigi" - Email = "tonis@docker.com" - GitHub = "tonistiigi" - - [people.unclejack] - Name = "Cristian Staretu" - Email = "cristian.staretu@gmail.com" - GitHub = "unclejack" - - [people.vbatts] - Name = "Vincent Batts" - Email = "vbatts@redhat.com" - GitHub = "vbatts" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" - - [people.vieux] - Name = "Victor Vieux" - Email = "vieux@docker.com" - GitHub = "vieux" - - [people.vishh] - Name = "Vishnu Kannan" - Email = "vishnuk@google.com" - GitHub = "vishh" diff --git a/vendor/github.com/docker/docker/Makefile b/vendor/github.com/docker/docker/Makefile index a8f48036..d7d72a16 100644 --- a/vendor/github.com/docker/docker/Makefile +++ b/vendor/github.com/docker/docker/Makefile @@ -1,116 +1,23 @@ -.PHONY: all binary build build-gccgo cross default docs docs-build docs-shell shell gccgo test test-docker-py test-integration-cli test-unit validate help +TARGETS := $(shell ls scripts) -# set the graph driver as the current graphdriver if not set -DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //')) +.dapper: + @echo Downloading dapper + @curl -sL https://releases.rancher.com/dapper/latest/dapper-`uname -s`-`uname -m` > .dapper.tmp + @@chmod +x .dapper.tmp + @./.dapper.tmp -v + @mv .dapper.tmp .dapper -# get OS/Arch of docker engine -DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH:-$$DOCKER_CLIENT_OSARCH}') -DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}') +$(TARGETS): .dapper + ./.dapper $@ -# env vars passed through directly to Docker's build scripts -# to allow things like `make KEEPBUNDLE=1 binary` easily -# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these -DOCKER_ENVS := \ - -e BUILDFLAGS \ - -e KEEPBUNDLE \ - -e DOCKER_BUILD_GOGC \ - -e DOCKER_BUILD_PKGS \ - -e DOCKER_DEBUG \ - -e DOCKER_EXPERIMENTAL \ - -e DOCKER_GITCOMMIT \ - -e DOCKER_GRAPHDRIVER=$(DOCKER_GRAPHDRIVER) \ - -e DOCKER_INCREMENTAL_BINARY \ - -e DOCKER_REMAP_ROOT \ - -e DOCKER_STORAGE_OPTS \ - -e DOCKER_USERLANDPROXY \ - -e TESTDIRS \ - -e TESTFLAGS \ - -e TIMEOUT -# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds +trash: .dapper + ./.dapper -m bind trash -# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test` -# (default to no bind mount if DOCKER_HOST is set) -# note: BINDDIR is supported for backwards-compatibility here -BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles)) -DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)") +trash-keep: .dapper + ./.dapper -m bind trash -k -# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs. -# The volume will be cleaned up when the container is removed due to `--rm`. -# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set. -DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v "/go/src/github.com/docker/docker/bundles") +deps: trash -GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) -DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH)) -DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) - -DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) - -# if this session isn't interactive, then we don't want to allocate a -# TTY, which would fail, but if it is interactive, we do want to attach -# so that the user can send e.g. ^C through. -INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) -ifeq ($(INTERACTIVE), 1) - DOCKER_FLAGS += -t -endif - -DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" - -default: binary - -all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives - $(DOCKER_RUN_DOCKER) hack/make.sh - -binary: build ## build the linux binaries - $(DOCKER_RUN_DOCKER) hack/make.sh binary - -build: bundles - docker build ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" . - -build-gccgo: bundles - docker build ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)-gccgo" -f Dockerfile.gccgo . - -bundles: - mkdir bundles - -cross: build ## cross build the binaries for darwin, freebsd and\nwindows - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross - -win: build ## cross build the binary for windows - $(DOCKER_RUN_DOCKER) hack/make.sh win - -tgz: build ## build the archives (.zip on windows and .tgz\notherwise) containing the binaries - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross tgz - -deb: build ## build the deb packages - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb - -docs: ## build the docs - $(MAKE) -C docs docs - -gccgo: build-gccgo ## build the gcc-go linux binaries - $(DOCKER_FLAGS) "$(DOCKER_IMAGE)-gccgo" hack/make.sh gccgo - -rpm: build ## build the rpm packages - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-rpm - -shell: build ## start a shell inside the build env - $(DOCKER_RUN_DOCKER) bash - -test: build ## run the unit, integration and docker-py tests - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-unit test-integration-cli test-docker-py - -test-docker-py: build ## run the docker-py tests - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py - -test-integration-cli: build ## run the integration tests - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration-cli - -test-unit: build ## run the unit tests - $(DOCKER_RUN_DOCKER) hack/make.sh test-unit - -validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor - $(DOCKER_RUN_DOCKER) hack/make.sh validate-dco validate-default-seccomp validate-gofmt validate-pkg validate-lint validate-test validate-toml validate-vet validate-vendor - -help: ## this help - @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) +.DEFAULT_GOAL := ci +.PHONY: $(TARGETS) diff --git a/vendor/github.com/docker/docker/README.md b/vendor/github.com/docker/docker/README.md deleted file mode 100644 index 26151c89..00000000 --- a/vendor/github.com/docker/docker/README.md +++ /dev/null @@ -1,304 +0,0 @@ -Docker: the container engine [![Release](https://img.shields.io/github/release/docker/docker.svg)](https://github.com/docker/docker/releases/latest) -============================ - -Docker is an open source project to pack, ship and run any application -as a lightweight container. - -Docker containers are both *hardware-agnostic* and *platform-agnostic*. -This means they can run anywhere, from your laptop to the largest -cloud compute instance and everything in between - and they don't require -you to use a particular language, framework or packaging system. That -makes them great building blocks for deploying and scaling web apps, -databases, and backend services without depending on a particular stack -or provider. - -Docker began as an open-source implementation of the deployment engine which -powers [dotCloud](https://www.dotcloud.com), a popular Platform-as-a-Service. -It benefits directly from the experience accumulated over several years -of large-scale operation and support of hundreds of thousands of -applications and databases. - -![](docs/static_files/docker-logo-compressed.png "Docker") - -## Security Disclosure - -Security is very important to us. If you have any issue regarding security, -please disclose the information responsibly by sending an email to -security@docker.com and not by creating a github issue. - -## Better than VMs - -A common method for distributing applications and sandboxing their -execution is to use virtual machines, or VMs. Typical VM formats are -VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory -these formats should allow every developer to automatically package -their application into a "machine" for easy distribution and deployment. -In practice, that almost never happens, for a few reasons: - - * *Size*: VMs are very large which makes them impractical to store - and transfer. - * *Performance*: running VMs consumes significant CPU and memory, - which makes them impractical in many scenarios, for example local - development of multi-tier applications, and large-scale deployment - of cpu and memory-intensive applications on large numbers of - machines. - * *Portability*: competing VM environments don't play well with each - other. Although conversion tools do exist, they are limited and - add even more overhead. - * *Hardware-centric*: VMs were designed with machine operators in - mind, not software developers. As a result, they offer very - limited tooling for what developers need most: building, testing - and running their software. For example, VMs offer no facilities - for application versioning, monitoring, configuration, logging or - service discovery. - -By contrast, Docker relies on a different sandboxing method known as -*containerization*. Unlike traditional virtualization, containerization -takes place at the kernel level. Most modern operating system kernels -now support the primitives necessary for containerization, including -Linux with [openvz](https://openvz.org), -[vserver](http://linux-vserver.org) and more recently -[lxc](https://linuxcontainers.org/), Solaris with -[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc), -and FreeBSD with -[Jails](https://www.freebsd.org/doc/handbook/jails.html). - -Docker builds on top of these low-level primitives to offer developers a -portable format and runtime environment that solves all four problems. -Docker containers are small (and their transfer can be optimized with -layers), they have basically zero memory and cpu overhead, they are -completely portable, and are designed from the ground up with an -application-centric design. - -Perhaps best of all, because Docker operates at the OS level, it can still be -run inside a VM! - -## Plays well with others - -Docker does not require you to buy into a particular programming -language, framework, packaging system, or configuration language. - -Is your application a Unix process? Does it use files, tcp connections, -environment variables, standard Unix streams and command-line arguments -as inputs and outputs? Then Docker can run it. - -Can your application's build be expressed as a sequence of such -commands? Then Docker can build it. - -## Escape dependency hell - -A common problem for developers is the difficulty of managing all -their application's dependencies in a simple and automated way. - -This is usually difficult for several reasons: - - * *Cross-platform dependencies*. Modern applications often depend on - a combination of system libraries and binaries, language-specific - packages, framework-specific modules, internal components - developed for another project, etc. These dependencies live in - different "worlds" and require different tools - these tools - typically don't work well with each other, requiring awkward - custom integrations. - - * *Conflicting dependencies*. Different applications may depend on - different versions of the same dependency. Packaging tools handle - these situations with various degrees of ease - but they all - handle them in different and incompatible ways, which again forces - the developer to do extra work. - - * *Custom dependencies*. A developer may need to prepare a custom - version of their application's dependency. Some packaging systems - can handle custom versions of a dependency, others can't - and all - of them handle it differently. - - -Docker solves the problem of dependency hell by giving the developer a simple -way to express *all* their application's dependencies in one place, while -streamlining the process of assembling them. If this makes you think of -[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't -*replace* your favorite packaging systems. It simply orchestrates -their use in a simple and repeatable way. How does it do that? With -layers. - -Docker defines a build as running a sequence of Unix commands, one -after the other, in the same container. Build commands modify the -contents of the container (usually by installing new files on the -filesystem), the next command modifies it some more, etc. Since each -build command inherits the result of the previous commands, the -*order* in which the commands are executed expresses *dependencies*. - -Here's a typical Docker build process: - -```bash -FROM ubuntu:12.04 -RUN apt-get update && apt-get install -y python python-pip curl -RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv -RUN cd helloflask-master && pip install -r requirements.txt -``` - -Note that Docker doesn't care *how* dependencies are built - as long -as they can be built by running a Unix command in a container. - - -Getting started -=============== - -Docker can be installed either on your computer for building applications or -on servers for running them. To get started, [check out the installation -instructions in the -documentation](https://docs.docker.com/engine/installation/). - -Usage examples -============== - -Docker can be used to run short-lived commands, long-running daemons -(app servers, databases, etc.), interactive shell sessions, etc. - -You can find a [list of real-world -examples](https://docs.docker.com/engine/examples/) in the -documentation. - -Under the hood --------------- - -Under the hood, Docker is built on the following components: - -* The - [cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt) - and - [namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html) - capabilities of the Linux kernel -* The [Go](https://golang.org) programming language -* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md) -* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md) - -Contributing to Docker [![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker) -====================== - -| **Master** (Linux) | **Experimental** (linux) | **Windows** | **FreeBSD** | -|------------------|----------------------|---------|---------| -| [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) | - -Want to hack on Docker? Awesome! We have [instructions to help you get -started contributing code or documentation](https://docs.docker.com/opensource/project/who-written-for/). - -These instructions are probably not perfect, please let us know if anything -feels wrong or incomplete. Better yet, submit a PR and improve them yourself. - -Getting the development builds -============================== - -Want to run Docker from a master build? You can download -master builds at [master.dockerproject.org](https://master.dockerproject.org). -They are updated with each commit merged into the master branch. - -Don't know how to use that super cool new feature in the master build? Check -out the master docs at -[docs.master.dockerproject.org](http://docs.master.dockerproject.org). - -How the project is run -====================== - -Docker is a very, very active project. If you want to learn more about how it is run, -or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project). - -We are always open to suggestions on process improvements, and are always looking for more maintainers. - -### Talking to other Docker users and contributors - - - - - - - - - - - - - - - - - - - - - - - - -
Internet Relay Chat (IRC) -

- IRC is a direct line to our most knowledgeable Docker users; we have - both the #docker and #docker-dev group on - irc.freenode.net. - IRC is a rich chat protocol but it can overwhelm new users. You can search - our chat archives. -

- Read our IRC quickstart guide for an easy way to get started. -
Docker Community Forums - The Docker Engine - group is for users of the Docker Engine project. -
Google Groups - The docker-dev group is for contributors and other people - contributing to the Docker project. You can join this group without a - Google account by sending an email to docker-dev+subscribe@googlegroups.com. - You'll receive a join-request message; simply reply to the message to - confirm your subscribtion. -
Twitter - You can follow Docker's Twitter feed - to get updates on our products. You can also tweet us questions or just - share blogs or stories. -
Stack Overflow - Stack Overflow has over 7000 Docker questions listed. We regularly - monitor Docker questions - and so do many other knowledgeable Docker users. -
- -### Legal - -*Brought to you courtesy of our legal counsel. For more context, -please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.* - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. - -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - - -Licensing -========= -Docker is licensed under the Apache License, Version 2.0. See -[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full -license text. - -Other Docker Related Projects -============================= -There are a number of projects under development that are based on Docker's -core technology. These projects expand the tooling built around the -Docker platform to broaden its application and utility. - -* [Docker Registry](https://github.com/docker/distribution): Registry -server for Docker (hosting/delivery of repositories and images) -* [Docker Machine](https://github.com/docker/machine): Machine management -for a container-centric world -* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering -system -* [Docker Compose](https://github.com/docker/compose) (formerly Fig): -Define and run multi-container apps -* [Kitematic](https://github.com/docker/kitematic): The easiest way to use -Docker on Mac and Windows - -If you know of another project underway that should be listed here, please help -us keep this list up-to-date by submitting a PR. - -Awesome-Docker -============== -You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there. diff --git a/vendor/github.com/docker/docker/ROADMAP.md b/vendor/github.com/docker/docker/ROADMAP.md deleted file mode 100644 index 514fdb74..00000000 --- a/vendor/github.com/docker/docker/ROADMAP.md +++ /dev/null @@ -1,140 +0,0 @@ -Docker Engine Roadmap -===================== - -### How should I use this document? - -This document provides description of items that the project decided to prioritize. This should -serve as a reference point for Docker contributors to understand where the project is going, and -help determine if a contribution could be conflicting with some longer terms plans. - -The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be -refused (except for those mentioned as "frozen features" below)! We are always happy to receive -patches for new cool features we haven't thought about, or didn't judge priority. Please however -understand that such patches might take longer for us to review. - -### How can I help? - -Short term objectives are listed in the [wiki](https://github.com/docker/docker/wiki) and described -in [Issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our -goal is to split down the workload in such way that anybody can jump in and help. Please comment on -issues if you want to take it to avoid duplicating effort! Similarly, if a maintainer is already -assigned on an issue you'd like to participate in, pinging him on IRC or GitHub to offer your help is -the best way to go. - -### How can I add something to the roadmap? - -The roadmap process is new to the Docker Engine: we are only beginning to structure and document the -project objectives. Our immediate goal is to be more transparent, and work with our community to -focus our efforts on fewer prioritized topics. - -We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but -we are not quite there yet. For the time being, the BDFL remains the keeper of the roadmap, and we -won't be accepting pull requests adding or removing items from this file. - -# 1. Features and refactoring - -## 1.1 Runtime improvements - -We recently introduced [`runC`](https://runc.io) as a standalone low-level tool for container -execution. The initial goal was to integrate runC as a replacement in the Engine for the traditional -default libcontainer `execdriver`, but the Engine internals were not ready for this. - -As runC continued evolving, and the OCI specification along with it, we created -[`containerd`](https://containerd.tools/), a daemon to control and monitor multiple `runC`. This is -the new target for Engine integration, as it can entirely replace the whole `execdriver` -architecture, and container monitoring along with it. - -Docker Engine will rely on a long-running `containerd` companion daemon for all container execution -related operations. This could open the door in the future for Engine restarts without interrupting -running containers. - -## 1.2 Plugins improvements - -Docker Engine 1.7.0 introduced plugin support, initially for the use cases of volumes and networks -extensions. The plugin infrastructure was kept minimal as we were collecting use cases and real -world feedback before optimizing for any particular workflow. - -In the future, we'd like plugins to become first class citizens, and encourage an ecosystem of -plugins. This implies in particular making it trivially easy to distribute plugins as containers -through any Registry instance, as well as solving the commonly heard pain points of plugins needing -to be treated as somewhat special (being active at all time, started before any other user -containers, and not as easily dismissed). - -## 1.3 Internal decoupling - -A lot of work has been done in trying to decouple the Docker Engine's internals. In particular, the -API implementation has been refactored and ongoing work is happening to move the code to a separate -repository ([`docker/engine-api`](https://github.com/docker/engine-api)), and the Builder side of -the daemon is now [fully independent](https://github.com/docker/docker/tree/master/builder) while -still residing in the same repository. - -We are exploring ways to go further with that decoupling, capitalizing on the work introduced by the -runtime renovation and plugins improvement efforts. Indeed, the combination of `containerd` support -with the concept of "special" containers opens the door for bootstrapping more Engine internals -using the same facilities. - -## 1.4 Cluster capable Engine - -The community has been pushing for a more cluster capable Docker Engine, and a huge effort was spent -adding features such as multihost networking, and node discovery down at the Engine level. Yet, the -Engine is currently incapable of taking scheduling decisions alone, and continues relying on Swarm -for that. - -We plan to complete this effort and make Engine fully cluster capable. Multiple instances of the -Docker Engine being already capable of discovering each other and establish overlay networking for -their container to communicate, the next step is for a given Engine to gain ability to dispatch work -to another node in the cluster. This will be introduced in a backward compatible way, such that a -`docker run` invocation on a particular node remains fully deterministic. - -# 2 Frozen features - -## 2.1 Docker exec - -We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a -*debugging* feature, as well as being strongly dependent on the Runtime ingredient effort. - -## 2.2 Dockerfile syntax - -The Dockerfile syntax as we know it is simple, and has proven successful in supporting all our -[official images](https://github.com/docker-library/official-images). Although this is *not* a -definitive move, we temporarily won't accept more patches to the Dockerfile syntax for several -reasons: - - - Long term impact of syntax changes is a sensitive matter that require an amount of attention the - volume of Engine codebase and activity today doesn't allow us to provide. - - Allowing the Builder to be implemented as a separate utility consuming the Engine's API will - open the door for many possibilities, such as offering alternate syntaxes or DSL for existing - languages without cluttering the Engine's codebase. - - A standalone Builder will also offer the opportunity for a better dedicated group of maintainers - to own the Dockerfile syntax and decide collectively on the direction to give it. - - Our experience with official images tend to show that no new instruction or syntax expansion is - *strictly* necessary for the majority of use cases, and although we are aware many things are - still lacking for many, we cannot make it a priority yet for the above reasons. - -Again, this is not about saying that the Dockerfile syntax is done, it's about making choices about -what we want to do first! - -## 2.3 Remote Registry Operations - -A large amount of work is ongoing in the area of image distribution and provenance. This includes -moving to the V2 Registry API and heavily refactoring the code that powers these features. The -desired result is more secure, reliable and easier to use image distribution. - -Part of the problem with this part of the code base is the lack of a stable and flexible interface. -If new features are added that access the registry without solidifying these interfaces, achieving -feature parity will continue to be elusive. While we get a handle on this situation, we are imposing -a moratorium on new code that accesses the Registry API in commands that don't already make remote -calls. - -Currently, only the following commands cause interaction with a remote registry: - - - push - - pull - - run - - build - - search - - login - -In the interest of stabilizing the registry access model during this ongoing work, we are not -accepting additions to other commands that will cause remote interaction with the Registry API. This -moratorium will lift when the goals of the distribution project have been met. diff --git a/vendor/github.com/docker/docker/VENDORING.md b/vendor/github.com/docker/docker/VENDORING.md deleted file mode 100644 index c6bb5086..00000000 --- a/vendor/github.com/docker/docker/VENDORING.md +++ /dev/null @@ -1,45 +0,0 @@ -# Vendoring policies - -This document outlines recommended Vendoring policies for Docker repositories. -(Example, libnetwork is a Docker repo and logrus is not.) - -## Vendoring using tags - -Commit ID based vendoring provides little/no information about the updates -vendored. To fix this, vendors will now require that repositories use annotated -tags along with commit ids to snapshot commits. Annotated tags by themselves -are not sufficient, since the same tag can be force updated to reference -different commits. - -Each tag should: -- Follow Semantic Versioning rules (refer to section on "Semantic Versioning") -- Have a corresponding entry in the change tracking document. - -Each repo should: -- Have a change tracking document between tags/releases. Ex: CHANGELOG.md, -github releases file. - -The goal here is for consuming repos to be able to use the tag version and -changelog updates to determine whether the vendoring will cause any breaking or -backward incompatible changes. This also means that repos can specify having -dependency on a package of a specific version or greater up to the next major -release, without encountering breaking changes. - -## Semantic Versioning -Annotated version tags should follow Schema Versioning policies. -According to http://semver.org: - -"Given a version number MAJOR.MINOR.PATCH, increment the: - MAJOR version when you make incompatible API changes, - MINOR version when you add functionality in a backwards-compatible manner, and - PATCH version when you make backwards-compatible bug fixes. -Additional labels for pre-release and build metadata are available as extensions -to the MAJOR.MINOR.PATCH format." - -## Vendoring cadence -In order to avoid huge vendoring changes, it is recommended to have a regular -cadence for vendoring updates. eg. monthly. - -## Pre-merge vendoring tests -All related repos will be vendored into docker/docker. -CI on docker/docker should catch any breaking changes involving multiple repos. diff --git a/vendor/github.com/docker/docker/VERSION b/vendor/github.com/docker/docker/VERSION deleted file mode 100644 index 381cf024..00000000 --- a/vendor/github.com/docker/docker/VERSION +++ /dev/null @@ -1 +0,0 @@ -1.12.0-dev diff --git a/vendor/github.com/docker/docker/api/client/attach.go b/vendor/github.com/docker/docker/api/client/attach.go new file mode 100644 index 00000000..e89644d6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/attach.go @@ -0,0 +1,109 @@ +package client + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/signal" + "github.com/docker/engine-api/types" +) + +// CmdAttach attaches to a running container. +// +// Usage: docker attach [OPTIONS] CONTAINER +func (cli *DockerCli) CmdAttach(args ...string) error { + cmd := Cli.Subcmd("attach", []string{"CONTAINER"}, Cli.DockerCommands["attach"].Description, true) + noStdin := cmd.Bool([]string{"-no-stdin"}, false, "Do not attach STDIN") + proxy := cmd.Bool([]string{"-sig-proxy"}, true, "Proxy all received signals to the process") + detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") + + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + c, err := cli.client.ContainerInspect(context.Background(), cmd.Arg(0)) + if err != nil { + return err + } + + if !c.State.Running { + return fmt.Errorf("You cannot attach to a stopped container, start it first") + } + + if c.State.Paused { + return fmt.Errorf("You cannot attach to a paused container, unpause it first") + } + + if err := cli.CheckTtyInput(!*noStdin, c.Config.Tty); err != nil { + return err + } + + if *detachKeys != "" { + cli.configFile.DetachKeys = *detachKeys + } + + options := types.ContainerAttachOptions{ + ContainerID: cmd.Arg(0), + Stream: true, + Stdin: !*noStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: cli.configFile.DetachKeys, + } + + var in io.ReadCloser + if options.Stdin { + in = cli.in + } + + if *proxy && !c.Config.Tty { + sigc := cli.forwardAllSignals(options.ContainerID) + defer signal.StopCatch(sigc) + } + + resp, err := cli.client.ContainerAttach(context.Background(), options) + if err != nil { + return err + } + defer resp.Close() + if in != nil && c.Config.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } + + if c.Config.Tty && cli.isTerminalOut { + height, width := cli.getTtySize() + // To handle the case where a user repeatedly attaches/detaches without resizing their + // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially + // resize it, then go back to normal. Without this, every attach after the first will + // require the user to manually resize or hit enter. + cli.resizeTtyTo(cmd.Arg(0), height+1, width+1, false) + + // After the above resizing occurs, the call to monitorTtySize below will handle resetting back + // to the actual size. + if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { + logrus.Debugf("Error monitoring TTY size: %s", err) + } + } + + if err := cli.holdHijackedConnection(c.Config.Tty, in, cli.out, cli.err, resp); err != nil { + return err + } + + _, status, err := getExitCode(cli, options.ContainerID) + if err != nil { + return err + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/build.go b/vendor/github.com/docker/docker/api/client/build.go new file mode 100644 index 00000000..cdba3fa5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/build.go @@ -0,0 +1,321 @@ +package client + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "runtime" + + "golang.org/x/net/context" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerignore" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/reference" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-units" +) + +type translatorFunc func(reference.NamedTagged) (reference.Canonical, error) + +// CmdBuild builds a new image from the source code at a given path. +// +// If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN. +// +// Usage: docker build [OPTIONS] PATH | URL | - +func (cli *DockerCli) CmdBuild(args ...string) error { + cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, Cli.DockerCommands["build"].Description, true) + flTags := opts.NewListOpts(validateTag) + cmd.Var(&flTags, []string{"t", "-tag"}, "Name and optionally a tag in the 'name:tag' format") + suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the build output and print image ID on success") + noCache := cmd.Bool([]string{"-no-cache"}, false, "Do not use cache when building the image") + rm := cmd.Bool([]string{"-rm"}, true, "Remove intermediate containers after a successful build") + forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers") + pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image") + dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") + flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") + flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flShmSize := cmd.String([]string{"-shm-size"}, "", "Size of /dev/shm, default value is 64MB") + flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period") + flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") + flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") + flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") + flBuildArg := opts.NewListOpts(runconfigopts.ValidateEnv) + cmd.Var(&flBuildArg, []string{"-build-arg"}, "Set build-time variables") + isolation := cmd.String([]string{"-isolation"}, "", "Container isolation technology") + + flLabels := opts.NewListOpts(nil) + cmd.Var(&flLabels, []string{"-label"}, "Set metadata for an image") + + ulimits := make(map[string]*units.Ulimit) + flUlimits := runconfigopts.NewUlimitOpt(&ulimits) + cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") + + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + var ( + ctx io.ReadCloser + err error + ) + + specifiedContext := cmd.Arg(0) + + var ( + contextDir string + tempDir string + relDockerfile string + progBuff io.Writer + buildBuff io.Writer + ) + + progBuff = cli.out + buildBuff = cli.out + if *suppressOutput { + progBuff = bytes.NewBuffer(nil) + buildBuff = bytes.NewBuffer(nil) + } + + switch { + case specifiedContext == "-": + ctx, relDockerfile, err = builder.GetContextFromReader(cli.in, *dockerfileName) + case urlutil.IsGitURL(specifiedContext): + tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, *dockerfileName) + case urlutil.IsURL(specifiedContext): + ctx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, *dockerfileName) + default: + contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, *dockerfileName) + } + + if err != nil { + if *suppressOutput && urlutil.IsURL(specifiedContext) { + fmt.Fprintln(cli.err, progBuff) + } + return fmt.Errorf("unable to prepare context: %s", err) + } + + if tempDir != "" { + defer os.RemoveAll(tempDir) + contextDir = tempDir + } + + if ctx == nil { + // And canonicalize dockerfile name to a platform-independent one + relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) + if err != nil { + return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) + } + + f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { + return err + } + + var excludes []string + if err == nil { + excludes, err = dockerignore.ReadAll(f) + if err != nil { + return err + } + } + + if err := builder.ValidateContextDirectory(contextDir, excludes); err != nil { + return fmt.Errorf("Error checking context: '%s'.", err) + } + + // If .dockerignore mentions .dockerignore or the Dockerfile + // then make sure we send both files over to the daemon + // because Dockerfile is, obviously, needed no matter what, and + // .dockerignore is needed to know if either one needs to be + // removed. The daemon will remove them for us, if needed, after it + // parses the Dockerfile. Ignore errors here, as they will have been + // caught by validateContextDirectory above. + var includes = []string{"."} + keepThem1, _ := fileutils.Matches(".dockerignore", excludes) + keepThem2, _ := fileutils.Matches(relDockerfile, excludes) + if keepThem1 || keepThem2 { + includes = append(includes, ".dockerignore", relDockerfile) + } + + ctx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: excludes, + IncludeFiles: includes, + }) + if err != nil { + return err + } + } + + // Setup an upload progress bar + progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) + + var body io.Reader = progress.NewProgressReader(ctx, progressOutput, 0, "", "Sending build context to Docker daemon") + + var memory int64 + if *flMemoryString != "" { + parsedMemory, err := units.RAMInBytes(*flMemoryString) + if err != nil { + return err + } + memory = parsedMemory + } + + var memorySwap int64 + if *flMemorySwap != "" { + if *flMemorySwap == "-1" { + memorySwap = -1 + } else { + parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) + if err != nil { + return err + } + memorySwap = parsedMemorySwap + } + } + + var shmSize int64 + if *flShmSize != "" { + shmSize, err = units.RAMInBytes(*flShmSize) + if err != nil { + return err + } + } + + options := types.ImageBuildOptions{ + Context: body, + Memory: memory, + MemorySwap: memorySwap, + Tags: flTags.GetAll(), + SuppressOutput: *suppressOutput, + NoCache: *noCache, + Remove: *rm, + ForceRemove: *forceRm, + PullParent: *pull, + Isolation: container.Isolation(*isolation), + CPUSetCPUs: *flCPUSetCpus, + CPUSetMems: *flCPUSetMems, + CPUShares: *flCPUShares, + CPUQuota: *flCPUQuota, + CPUPeriod: *flCPUPeriod, + CgroupParent: *flCgroupParent, + Dockerfile: relDockerfile, + ShmSize: shmSize, + Ulimits: flUlimits.GetList(), + BuildArgs: runconfigopts.ConvertKVStringsToMap(flBuildArg.GetAll()), + AuthConfigs: cli.retrieveAuthConfigs(), + Labels: runconfigopts.ConvertKVStringsToMap(flLabels.GetAll()), + } + + response, err := cli.client.ImageBuild(context.Background(), options) + if err != nil { + return err + } + defer response.Body.Close() + + err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, cli.outFd, cli.isTerminalOut, nil) + if err != nil { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + if *suppressOutput { + fmt.Fprintf(cli.err, "%s%s", progBuff, buildBuff) + } + return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + } + + // Windows: show error message about modified file permissions if the + // daemon isn't running Windows. + if response.OSType != "windows" && runtime.GOOS == "windows" { + fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if *suppressOutput { + fmt.Fprintf(cli.out, "%s", buildBuff) + } + + return nil +} + +// validateTag checks if the given image name can be resolved. +func validateTag(rawRepo string) (string, error) { + _, err := reference.ParseNamed(rawRepo) + if err != nil { + return "", err + } + + return rawRepo, nil +} + +var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) + +// resolvedTag records the repository, tag, and resolved digest reference +// from a Dockerfile rewrite. +type resolvedTag struct { + digestRef reference.Canonical + tagRef reference.NamedTagged +} + +// replaceDockerfileTarWrapper wraps the given input tar archive stream and +// replaces the entry with the given Dockerfile name with the contents of the +// new Dockerfile. Returns a new tar archive stream with the replaced +// Dockerfile. +func replaceDockerfileTarWrapper(inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + + defer inputTarStream.Close() + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + // Signals end of archive. + tarWriter.Close() + pipeWriter.Close() + return + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + var content io.Reader = tarReader + + if err := tarWriter.WriteHeader(hdr); err != nil { + pipeWriter.CloseWithError(err) + return + } + + if _, err := io.Copy(tarWriter, content); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + }() + + return pipeReader +} diff --git a/vendor/github.com/docker/docker/api/client/cli.go b/vendor/github.com/docker/docker/api/client/cli.go new file mode 100644 index 00000000..6c673da4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/cli.go @@ -0,0 +1,215 @@ +package client + +import ( + "errors" + "fmt" + "io" + "net/http" + "os" + "runtime" + + "github.com/docker/docker/api" + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/cliconfig/credentials" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/term" + "github.com/docker/engine-api/client" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +// DockerCli represents the docker command line client. +// Instances of the client can be returned from NewDockerCli. +type DockerCli struct { + // initializing closure + init func() error + + // configFile has the client configuration file + configFile *cliconfig.ConfigFile + // in holds the input stream and closer (io.ReadCloser) for the client. + in io.ReadCloser + // out holds the output stream (io.Writer) for the client. + out io.Writer + // err holds the error stream (io.Writer) for the client. + err io.Writer + // keyFile holds the key file as a string. + keyFile string + // inFd holds the file descriptor of the client's STDIN (if valid). + inFd uintptr + // outFd holds file descriptor of the client's STDOUT (if valid). + outFd uintptr + // isTerminalIn indicates whether the client's STDIN is a TTY + isTerminalIn bool + // isTerminalOut indicates whether the client's STDOUT is a TTY + isTerminalOut bool + // client is the http client that performs all API operations + client client.APIClient + // state holds the terminal state + state *term.State +} + +// Initialize calls the init function that will setup the configuration for the client +// such as the TLS, tcp and other parameters used to run the client. +func (cli *DockerCli) Initialize() error { + if cli.init == nil { + return nil + } + return cli.init() +} + +// CheckTtyInput checks if we are trying to attach to a container tty +// from a non-tty client input stream, and if so, returns an error. +func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { + // In order to attach to a container tty, input stream for the client must + // be a tty itself: redirecting or piping the client standard input is + // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. + if ttyMode && attachStdin && !cli.isTerminalIn { + return errors.New("cannot enable tty mode on non tty input") + } + return nil +} + +// PsFormat returns the format string specified in the configuration. +// String contains columns and format specification, for example {{ID}}\t{{Name}}. +func (cli *DockerCli) PsFormat() string { + return cli.configFile.PsFormat +} + +// ImagesFormat returns the format string specified in the configuration. +// String contains columns and format specification, for example {{ID}}\t{{Name}}. +func (cli *DockerCli) ImagesFormat() string { + return cli.configFile.ImagesFormat +} + +func (cli *DockerCli) setRawTerminal() error { + if cli.isTerminalIn && os.Getenv("NORAW") == "" { + state, err := term.SetRawTerminal(cli.inFd) + if err != nil { + return err + } + cli.state = state + } + return nil +} + +func (cli *DockerCli) restoreTerminal(in io.Closer) error { + if cli.state != nil { + term.RestoreTerminal(cli.inFd, cli.state) + } + // WARNING: DO NOT REMOVE THE OS CHECK !!! + // For some reason this Close call blocks on darwin.. + // As the client exists right after, simply discard the close + // until we find a better solution. + if in != nil && runtime.GOOS != "darwin" { + return in.Close() + } + return nil +} + +// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. +// The key file, protocol (i.e. unix) and address are passed in as strings, along with the tls.Config. If the tls.Config +// is set the client scheme will be set to https. +// The client will be given a 32-second timeout (see https://github.com/docker/docker/pull/8035). +func NewDockerCli(in io.ReadCloser, out, err io.Writer, clientFlags *cli.ClientFlags) *DockerCli { + cli := &DockerCli{ + in: in, + out: out, + err: err, + keyFile: clientFlags.Common.TrustKey, + } + + cli.init = func() error { + clientFlags.PostParse() + configFile, e := cliconfig.Load(cliconfig.ConfigDir()) + if e != nil { + fmt.Fprintf(cli.err, "WARNING: Error loading config file:%v\n", e) + } + if !configFile.ContainsAuth() { + credentials.DetectDefaultStore(configFile) + } + cli.configFile = configFile + + host, err := getServerHost(clientFlags.Common.Hosts, clientFlags.Common.TLSOptions) + if err != nil { + return err + } + + customHeaders := cli.configFile.HTTPHeaders + if customHeaders == nil { + customHeaders = map[string]string{} + } + customHeaders["User-Agent"] = clientUserAgent() + + verStr := api.DefaultVersion.String() + if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { + verStr = tmpStr + } + + httpClient, err := newHTTPClient(host, clientFlags.Common.TLSOptions) + if err != nil { + return err + } + + client, err := client.NewClient(host, verStr, httpClient, customHeaders) + if err != nil { + return err + } + cli.client = client + + if cli.in != nil { + cli.inFd, cli.isTerminalIn = term.GetFdInfo(cli.in) + } + if cli.out != nil { + cli.outFd, cli.isTerminalOut = term.GetFdInfo(cli.out) + } + + return nil + } + + return cli +} + +func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { + switch len(hosts) { + case 0: + host = os.Getenv("DOCKER_HOST") + case 1: + host = hosts[0] + default: + return "", errors.New("Please specify only one -H") + } + + host, err = opts.ParseHost(tlsOptions != nil, host) + return +} + +func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { + if tlsOptions == nil { + // let the api client configure the default transport. + return nil, nil + } + + config, err := tlsconfig.Client(*tlsOptions) + if err != nil { + return nil, err + } + tr := &http.Transport{ + TLSClientConfig: config, + } + proto, addr, _, err := client.ParseHost(host) + if err != nil { + return nil, err + } + + sockets.ConfigureTransport(tr, proto, addr) + + return &http.Client{ + Transport: tr, + }, nil +} + +func clientUserAgent() string { + return "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")" +} diff --git a/vendor/github.com/docker/docker/api/client/client.go b/vendor/github.com/docker/docker/api/client/client.go new file mode 100644 index 00000000..4cfce5f6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/client.go @@ -0,0 +1,5 @@ +// Package client provides a command-line interface for Docker. +// +// Run "docker help SUBCOMMAND" or "docker SUBCOMMAND --help" to see more information on any Docker subcommand, including the full list of options supported for the subcommand. +// See https://docs.docker.com/installation/ for instructions on installing Docker. +package client diff --git a/vendor/github.com/docker/docker/api/client/commit.go b/vendor/github.com/docker/docker/api/client/commit.go new file mode 100644 index 00000000..6bec4297 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/commit.go @@ -0,0 +1,85 @@ +package client + +import ( + "encoding/json" + "errors" + "fmt" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" +) + +// CmdCommit creates a new image from a container's changes. +// +// Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] +func (cli *DockerCli) CmdCommit(args ...string) error { + cmd := Cli.Subcmd("commit", []string{"CONTAINER [REPOSITORY[:TAG]]"}, Cli.DockerCommands["commit"].Description, true) + flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit") + flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") + flAuthor := cmd.String([]string{"a", "-author"}, "", "Author (e.g., \"John Hannibal Smith \")") + flChanges := opts.NewListOpts(nil) + cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") + // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. + flConfig := cmd.String([]string{"#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") + cmd.Require(flag.Max, 2) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + name = cmd.Arg(0) + repositoryAndTag = cmd.Arg(1) + repositoryName string + tag string + ) + + //Check if the given image name can be resolved + if repositoryAndTag != "" { + ref, err := reference.ParseNamed(repositoryAndTag) + if err != nil { + return err + } + + repositoryName = ref.Name() + + switch x := ref.(type) { + case reference.Canonical: + return errors.New("cannot commit to digest reference") + case reference.NamedTagged: + tag = x.Tag() + } + } + + var config *container.Config + if *flConfig != "" { + config = &container.Config{} + if err := json.Unmarshal([]byte(*flConfig), config); err != nil { + return err + } + } + + options := types.ContainerCommitOptions{ + ContainerID: name, + RepositoryName: repositoryName, + Tag: tag, + Comment: *flComment, + Author: *flAuthor, + Changes: flChanges.GetAll(), + Pause: *flPause, + Config: config, + } + + response, err := cli.client.ContainerCommit(context.Background(), options) + if err != nil { + return err + } + + fmt.Fprintln(cli.out, response.ID) + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/cp.go b/vendor/github.com/docker/docker/api/client/cp.go new file mode 100644 index 00000000..61005602 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/cp.go @@ -0,0 +1,298 @@ +package client + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/archive" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/system" + "github.com/docker/engine-api/types" +) + +type copyDirection int + +const ( + fromContainer copyDirection = (1 << iota) + toContainer + acrossContainers = fromContainer | toContainer +) + +type cpConfig struct { + followLink bool +} + +// CmdCp copies files/folders to or from a path in a container. +// +// When copying from a container, if DEST_PATH is '-' the data is written as a +// tar archive file to STDOUT. +// +// When copying to a container, if SRC_PATH is '-' the data is read as a tar +// archive file from STDIN, and the destination CONTAINER:DEST_PATH, must specify +// a directory. +// +// Usage: +// docker cp CONTAINER:SRC_PATH DEST_PATH|- +// docker cp SRC_PATH|- CONTAINER:DEST_PATH +func (cli *DockerCli) CmdCp(args ...string) error { + cmd := Cli.Subcmd( + "cp", + []string{"CONTAINER:SRC_PATH DEST_PATH|-", "SRC_PATH|- CONTAINER:DEST_PATH"}, + strings.Join([]string{ + Cli.DockerCommands["cp"].Description, + "\nUse '-' as the source to read a tar archive from stdin\n", + "and extract it to a directory destination in a container.\n", + "Use '-' as the destination to stream a tar archive of a\n", + "container source to stdout.", + }, ""), + true, + ) + + followLink := cmd.Bool([]string{"L", "-follow-link"}, false, "Always follow symbol link in SRC_PATH") + + cmd.Require(flag.Exact, 2) + cmd.ParseFlags(args, true) + + if cmd.Arg(0) == "" { + return fmt.Errorf("source can not be empty") + } + if cmd.Arg(1) == "" { + return fmt.Errorf("destination can not be empty") + } + + srcContainer, srcPath := splitCpArg(cmd.Arg(0)) + dstContainer, dstPath := splitCpArg(cmd.Arg(1)) + + var direction copyDirection + if srcContainer != "" { + direction |= fromContainer + } + if dstContainer != "" { + direction |= toContainer + } + + cpParam := &cpConfig{ + followLink: *followLink, + } + + switch direction { + case fromContainer: + return cli.copyFromContainer(srcContainer, srcPath, dstPath, cpParam) + case toContainer: + return cli.copyToContainer(srcPath, dstContainer, dstPath, cpParam) + case acrossContainers: + // Copying between containers isn't supported. + return fmt.Errorf("copying between containers is not supported") + default: + // User didn't specify any container. + return fmt.Errorf("must specify at least one container source") + } +} + +// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be +// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by +// requiring a LOCALPATH with a `:` to be made explicit with a relative or +// absolute path: +// `/path/to/file:name.txt` or `./file:name.txt` +// +// This is apparently how `scp` handles this as well: +// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ +// +// We can't simply check for a filepath separator because container names may +// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, +// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows +// client, a `:` could be part of an absolute Windows path, in which case it +// is immediately proceeded by a backslash. +func splitCpArg(arg string) (container, path string) { + if system.IsAbs(arg) { + // Explicit local absolute path, e.g., `C:\foo` or `/foo`. + return "", arg + } + + parts := strings.SplitN(arg, ":", 2) + + if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { + // Either there's no `:` in the arg + // OR it's an explicit local relative path like `./file:name.txt`. + return "", arg + } + + return parts[0], parts[1] +} + +func (cli *DockerCli) statContainerPath(containerName, path string) (types.ContainerPathStat, error) { + return cli.client.ContainerStatPath(context.Background(), containerName, path) +} + +func resolveLocalPath(localPath string) (absPath string, err error) { + if absPath, err = filepath.Abs(localPath); err != nil { + return + } + + return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil +} + +func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { + if dstPath != "-" { + // Get an absolute destination path. + dstPath, err = resolveLocalPath(dstPath) + if err != nil { + return err + } + } + + // if client requests to follow symbol link, then must decide target file to be copied + var rebaseName string + if cpParam.followLink { + srcStat, err := cli.statContainerPath(srcContainer, srcPath) + + // If the destination is a symbolic link, we should follow it. + if err == nil && srcStat.Mode&os.ModeSymlink != 0 { + linkTarget := srcStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + srcParent, _ := archive.SplitPathDirEntry(srcPath) + linkTarget = filepath.Join(srcParent, linkTarget) + } + + linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) + srcPath = linkTarget + } + + } + + content, stat, err := cli.client.CopyFromContainer(context.Background(), srcContainer, srcPath) + if err != nil { + return err + } + defer content.Close() + + if dstPath == "-" { + // Send the response to STDOUT. + _, err = io.Copy(os.Stdout, content) + + return err + } + + // Prepare source copy info. + srcInfo := archive.CopyInfo{ + Path: srcPath, + Exists: true, + IsDir: stat.Mode.IsDir(), + RebaseName: rebaseName, + } + + preArchive := content + if len(srcInfo.RebaseName) != 0 { + _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) + preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) + } + // See comments in the implementation of `archive.CopyTo` for exactly what + // goes into deciding how and whether the source archive needs to be + // altered for the correct copy behavior. + return archive.CopyTo(preArchive, srcInfo, dstPath) +} + +func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) { + if srcPath != "-" { + // Get an absolute source path. + srcPath, err = resolveLocalPath(srcPath) + if err != nil { + return err + } + } + + // In order to get the copy behavior right, we need to know information + // about both the source and destination. The API is a simple tar + // archive/extract API but we can use the stat info header about the + // destination to be more informed about exactly what the destination is. + + // Prepare destination copy info by stat-ing the container path. + dstInfo := archive.CopyInfo{Path: dstPath} + dstStat, err := cli.statContainerPath(dstContainer, dstPath) + + // If the destination is a symbolic link, we should evaluate it. + if err == nil && dstStat.Mode&os.ModeSymlink != 0 { + linkTarget := dstStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := archive.SplitPathDirEntry(dstPath) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + dstInfo.Path = linkTarget + dstStat, err = cli.statContainerPath(dstContainer, linkTarget) + } + + // Ignore any error and assume that the parent directory of the destination + // path exists, in which case the copy may still succeed. If there is any + // type of conflict (e.g., non-directory overwriting an existing directory + // or vice versa) the extraction will fail. If the destination simply did + // not exist, but the parent directory does, the extraction will still + // succeed. + if err == nil { + dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() + } + + var ( + content io.Reader + resolvedDstPath string + ) + + if srcPath == "-" { + // Use STDIN. + content = os.Stdin + resolvedDstPath = dstInfo.Path + if !dstInfo.IsDir { + return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath)) + } + } else { + // Prepare source copy info. + srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink) + if err != nil { + return err + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return err + } + defer srcArchive.Close() + + // With the stat info about the local source as well as the + // destination, we have enough information to know whether we need to + // alter the archive that we upload so that when the server extracts + // it to the specified directory in the container we get the desired + // copy behavior. + + // See comments in the implementation of `archive.PrepareArchiveCopy` + // for exactly what goes into deciding how and whether the source + // archive needs to be altered for the correct copy behavior when it is + // extracted. This function also infers from the source and destination + // info which directory to extract to, which may be the parent of the + // destination that the user specified. + dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return err + } + defer preparedArchive.Close() + + resolvedDstPath = dstDir + content = preparedArchive + } + + options := types.CopyToContainerOptions{ + ContainerID: dstContainer, + Path: resolvedDstPath, + Content: content, + AllowOverwriteDirWithFile: false, + } + + return cli.client.CopyToContainer(context.Background(), options) +} diff --git a/vendor/github.com/docker/docker/api/client/create.go b/vendor/github.com/docker/docker/api/client/create.go new file mode 100644 index 00000000..176bd8e9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/create.go @@ -0,0 +1,164 @@ +package client + +import ( + "fmt" + "io" + "os" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + networktypes "github.com/docker/engine-api/types/network" +) + +func (cli *DockerCli) pullImage(image string) error { + return cli.pullImageCustomOut(image, cli.out) +} + +func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { + ref, err := reference.ParseNamed(image) + if err != nil { + return err + } + + var tag string + switch x := reference.WithDefaultTag(ref).(type) { + case reference.Canonical: + tag = x.Digest().String() + case reference.NamedTagged: + tag = x.Tag() + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + authConfig := cli.resolveAuthConfig(repoInfo.Index) + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageCreateOptions{ + Parent: ref.Name(), + Tag: tag, + RegistryAuth: encodedAuth, + } + + responseBody, err := cli.client.ImageCreate(context.Background(), options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, out, cli.outFd, cli.isTerminalOut, nil) +} + +type cidFile struct { + path string + file *os.File + written bool +} + +func newCIDFile(path string) (*cidFile, error) { + if _, err := os.Stat(path); err == nil { + return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) + } + + f, err := os.Create(path) + if err != nil { + return nil, fmt.Errorf("Failed to create the container ID file: %s", err) + } + + return &cidFile{path: path, file: f}, nil +} + +func (cli *DockerCli) createContainer(config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*types.ContainerCreateResponse, error) { + var containerIDFile *cidFile + if cidfile != "" { + var err error + if containerIDFile, err = newCIDFile(cidfile); err != nil { + return nil, err + } + defer containerIDFile.Close() + } + + _, ref, err := reference.ParseIDOrReference(config.Image) + if err != nil { + return nil, err + } + if ref != nil { + ref = reference.WithDefaultTag(ref) + } + + //create the container + response, err := cli.client.ContainerCreate(context.Background(), config, hostConfig, networkingConfig, name) + + //if image not found try to pull it + if err != nil { + if client.IsErrImageNotFound(err) && ref != nil { + fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", ref.String()) + + // we don't want to write to stdout anything apart from container.ID + if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { + return nil, err + } + // Retry + var retryErr error + response, retryErr = cli.client.ContainerCreate(context.Background(), config, hostConfig, networkingConfig, name) + if retryErr != nil { + return nil, retryErr + } + } else { + return nil, err + } + } + + for _, warning := range response.Warnings { + fmt.Fprintf(cli.err, "WARNING: %s\n", warning) + } + if containerIDFile != nil { + if err = containerIDFile.Write(response.ID); err != nil { + return nil, err + } + } + return &response, nil +} + +// CmdCreate creates a new container from a given image. +// +// Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] +func (cli *DockerCli) CmdCreate(args ...string) error { + cmd := Cli.Subcmd("create", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["create"].Description, true) + + // These are flags not stored in Config/HostConfig + var ( + flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") + ) + + config, hostConfig, networkingConfig, cmd, err := runconfigopts.Parse(cmd, args) + + if err != nil { + cmd.ReportError(err.Error(), true) + os.Exit(1) + } + if config.Image == "" { + cmd.Usage() + return nil + } + response, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + return err + } + fmt.Fprintf(cli.out, "%s\n", response.ID) + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/diff.go b/vendor/github.com/docker/docker/api/client/diff.go new file mode 100644 index 00000000..e17768fd --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/diff.go @@ -0,0 +1,49 @@ +package client + +import ( + "fmt" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/archive" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdDiff shows changes on a container's filesystem. +// +// Each changed file is printed on a separate line, prefixed with a single +// character that indicates the status of the file: C (modified), A (added), +// or D (deleted). +// +// Usage: docker diff CONTAINER +func (cli *DockerCli) CmdDiff(args ...string) error { + cmd := Cli.Subcmd("diff", []string{"CONTAINER"}, Cli.DockerCommands["diff"].Description, true) + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + if cmd.Arg(0) == "" { + return fmt.Errorf("Container name cannot be empty") + } + + changes, err := cli.client.ContainerDiff(context.Background(), cmd.Arg(0)) + if err != nil { + return err + } + + for _, change := range changes { + var kind string + switch change.Kind { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + fmt.Fprintf(cli.out, "%s %s\n", kind, change.Path) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/events.go b/vendor/github.com/docker/docker/api/client/events.go new file mode 100644 index 00000000..d2408c19 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/events.go @@ -0,0 +1,146 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonlog" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" + eventtypes "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" +) + +// CmdEvents prints a live stream of real time events from the server. +// +// Usage: docker events [OPTIONS] +func (cli *DockerCli) CmdEvents(args ...string) error { + cmd := Cli.Subcmd("events", nil, Cli.DockerCommands["events"].Description, true) + since := cmd.String([]string{"-since"}, "", "Show all events created since timestamp") + until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp") + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + eventFilterArgs := filters.NewArgs() + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + for _, f := range flFilter.GetAll() { + var err error + eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) + if err != nil { + return err + } + } + + options := types.EventsOptions{ + Since: *since, + Until: *until, + Filters: eventFilterArgs, + } + + responseBody, err := cli.client.Events(context.Background(), options) + if err != nil { + return err + } + defer responseBody.Close() + + return streamEvents(responseBody, cli.out) +} + +// streamEvents decodes prints the incoming events in the provided output. +func streamEvents(input io.Reader, output io.Writer) error { + return decodeEvents(input, func(event eventtypes.Message, err error) error { + if err != nil { + return err + } + printOutput(event, output) + return nil + }) +} + +type eventProcessor func(event eventtypes.Message, err error) error + +func decodeEvents(input io.Reader, ep eventProcessor) error { + dec := json.NewDecoder(input) + for { + var event eventtypes.Message + err := dec.Decode(&event) + if err != nil && err == io.EOF { + break + } + + if procErr := ep(event, err); procErr != nil { + return procErr + } + } + return nil +} + +// printOutput prints all types of event information. +// Each output includes the event type, actor id, name and action. +// Actor attributes are printed at the end if the actor has any. +func printOutput(event eventtypes.Message, output io.Writer) { + if event.TimeNano != 0 { + fmt.Fprintf(output, "%s ", time.Unix(0, event.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + } else if event.Time != 0 { + fmt.Fprintf(output, "%s ", time.Unix(event.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + } + + fmt.Fprintf(output, "%s %s %s", event.Type, event.Action, event.Actor.ID) + + if len(event.Actor.Attributes) > 0 { + var attrs []string + var keys []string + for k := range event.Actor.Attributes { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := event.Actor.Attributes[k] + attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) + } + fmt.Fprintf(output, " (%s)", strings.Join(attrs, ", ")) + } + fmt.Fprint(output, "\n") +} + +type eventHandler struct { + handlers map[string]func(eventtypes.Message) + mu sync.Mutex +} + +func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { + w.mu.Lock() + w.handlers[action] = h + w.mu.Unlock() +} + +// Watch ranges over the passed in event chan and processes the events based on the +// handlers created for a given action. +// To stop watching, close the event chan. +func (w *eventHandler) Watch(c <-chan eventtypes.Message) { + for e := range c { + w.mu.Lock() + h, exists := w.handlers[e.Action] + w.mu.Unlock() + if !exists { + continue + } + logrus.Debugf("event handler: received event: %v", e) + go h(e) + } +} diff --git a/vendor/github.com/docker/docker/api/client/exec.go b/vendor/github.com/docker/docker/api/client/exec.go new file mode 100644 index 00000000..520c3a38 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/exec.go @@ -0,0 +1,166 @@ +package client + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/promise" + "github.com/docker/engine-api/types" +) + +// CmdExec runs a command in a running container. +// +// Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] +func (cli *DockerCli) CmdExec(args ...string) error { + cmd := Cli.Subcmd("exec", []string{"CONTAINER COMMAND [ARG...]"}, Cli.DockerCommands["exec"].Description, true) + detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") + + execConfig, err := ParseExec(cmd, args) + // just in case the ParseExec does not exit + if execConfig.Container == "" || err != nil { + return Cli.StatusError{StatusCode: 1} + } + + if *detachKeys != "" { + cli.configFile.DetachKeys = *detachKeys + } + + // Send client escape keys + execConfig.DetachKeys = cli.configFile.DetachKeys + + response, err := cli.client.ContainerExecCreate(context.Background(), *execConfig) + if err != nil { + return err + } + + execID := response.ID + if execID == "" { + fmt.Fprintf(cli.out, "exec ID empty") + return nil + } + + //Temp struct for execStart so that we don't need to transfer all the execConfig + if !execConfig.Detach { + if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { + return err + } + } else { + execStartCheck := types.ExecStartCheck{ + Detach: execConfig.Detach, + Tty: execConfig.Tty, + } + + if err := cli.client.ContainerExecStart(context.Background(), execID, execStartCheck); err != nil { + return err + } + // For now don't print this - wait for when we support exec wait() + // fmt.Fprintf(cli.out, "%s\n", execID) + return nil + } + + // Interactive exec requested. + var ( + out, stderr io.Writer + in io.ReadCloser + errCh chan error + ) + + if execConfig.AttachStdin { + in = cli.in + } + if execConfig.AttachStdout { + out = cli.out + } + if execConfig.AttachStderr { + if execConfig.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + + resp, err := cli.client.ContainerExecAttach(context.Background(), execID, *execConfig) + if err != nil { + return err + } + defer resp.Close() + if in != nil && execConfig.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } + errCh = promise.Go(func() error { + return cli.holdHijackedConnection(execConfig.Tty, in, out, stderr, resp) + }) + + if execConfig.Tty && cli.isTerminalIn { + if err := cli.monitorTtySize(execID, true); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + + var status int + if _, status, err = getExecExitCode(cli, execID); err != nil { + return err + } + + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + + return nil +} + +// ParseExec parses the specified args for the specified command and generates +// an ExecConfig from it. +// If the minimal number of specified args is not right or if specified args are +// not valid, it will return an error. +func ParseExec(cmd *flag.FlagSet, args []string) (*types.ExecConfig, error) { + var ( + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") + flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") + flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to the command") + execCmd []string + container string + ) + cmd.Require(flag.Min, 2) + if err := cmd.ParseFlags(args, true); err != nil { + return nil, err + } + container = cmd.Arg(0) + parsedArgs := cmd.Args() + execCmd = parsedArgs[1:] + + execConfig := &types.ExecConfig{ + User: *flUser, + Privileged: *flPrivileged, + Tty: *flTty, + Cmd: execCmd, + Container: container, + Detach: *flDetach, + } + + // If -d is not set, attach to everything by default + if !*flDetach { + execConfig.AttachStdout = true + execConfig.AttachStderr = true + if *flStdin { + execConfig.AttachStdin = true + } + } + + return execConfig, nil +} diff --git a/vendor/github.com/docker/docker/api/client/export.go b/vendor/github.com/docker/docker/api/client/export.go new file mode 100644 index 00000000..a1d3ebe7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/export.go @@ -0,0 +1,42 @@ +package client + +import ( + "errors" + "io" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdExport exports a filesystem as a tar archive. +// +// The tar archive is streamed to STDOUT by default or written to a file. +// +// Usage: docker export [OPTIONS] CONTAINER +func (cli *DockerCli) CmdExport(args ...string) error { + cmd := Cli.Subcmd("export", []string{"CONTAINER"}, Cli.DockerCommands["export"].Description, true) + outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + if *outfile == "" && cli.isTerminalOut { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + responseBody, err := cli.client.ContainerExport(context.Background(), cmd.Arg(0)) + if err != nil { + return err + } + defer responseBody.Close() + + if *outfile == "" { + _, err := io.Copy(cli.out, responseBody) + return err + } + + return copyToFile(*outfile, responseBody) + +} diff --git a/vendor/github.com/docker/docker/api/client/formatter/custom.go b/vendor/github.com/docker/docker/api/client/formatter/custom.go new file mode 100644 index 00000000..2bb26a3d --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/formatter/custom.go @@ -0,0 +1,242 @@ +package formatter + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/engine-api/types" + "github.com/docker/go-units" +) + +const ( + tableKey = "table" + + containerIDHeader = "CONTAINER ID" + imageHeader = "IMAGE" + namesHeader = "NAMES" + commandHeader = "COMMAND" + createdSinceHeader = "CREATED" + createdAtHeader = "CREATED AT" + runningForHeader = "CREATED" + statusHeader = "STATUS" + portsHeader = "PORTS" + sizeHeader = "SIZE" + labelsHeader = "LABELS" + imageIDHeader = "IMAGE ID" + repositoryHeader = "REPOSITORY" + tagHeader = "TAG" + digestHeader = "DIGEST" + mountsHeader = "MOUNTS" +) + +type containerContext struct { + baseSubContext + trunc bool + c types.Container +} + +func (c *containerContext) ID() string { + c.addHeader(containerIDHeader) + if c.trunc { + return stringid.TruncateID(c.c.ID) + } + return c.c.ID +} + +func (c *containerContext) Names() string { + c.addHeader(namesHeader) + names := stripNamePrefix(c.c.Names) + if c.trunc { + for _, name := range names { + if len(strings.Split(name, "/")) == 1 { + names = []string{name} + break + } + } + } + return strings.Join(names, ",") +} + +func (c *containerContext) Image() string { + c.addHeader(imageHeader) + if c.c.Image == "" { + return "" + } + if c.trunc { + if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { + return trunc + } + } + return c.c.Image +} + +func (c *containerContext) Command() string { + c.addHeader(commandHeader) + command := c.c.Command + if c.trunc { + command = stringutils.Truncate(command, 20) + } + return strconv.Quote(command) +} + +func (c *containerContext) CreatedAt() string { + c.addHeader(createdAtHeader) + return time.Unix(int64(c.c.Created), 0).String() +} + +func (c *containerContext) RunningFor() string { + c.addHeader(runningForHeader) + createdAt := time.Unix(int64(c.c.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *containerContext) Ports() string { + c.addHeader(portsHeader) + return api.DisplayablePorts(c.c.Ports) +} + +func (c *containerContext) Status() string { + c.addHeader(statusHeader) + return c.c.Status +} + +func (c *containerContext) Size() string { + c.addHeader(sizeHeader) + srw := units.HumanSize(float64(c.c.SizeRw)) + sv := units.HumanSize(float64(c.c.SizeRootFs)) + + sf := srw + if c.c.SizeRootFs > 0 { + sf = fmt.Sprintf("%s (virtual %s)", srw, sv) + } + return sf +} + +func (c *containerContext) Labels() string { + c.addHeader(labelsHeader) + if c.c.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.c.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *containerContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + c.addHeader(h) + + if c.c.Labels == nil { + return "" + } + return c.c.Labels[name] +} + +func (c *containerContext) Mounts() string { + c.addHeader(mountsHeader) + + var name string + var mounts []string + for _, m := range c.c.Mounts { + if m.Name == "" { + name = m.Source + } else { + name = m.Name + } + if c.trunc { + name = stringutils.Truncate(name, 15) + } + mounts = append(mounts, name) + } + return strings.Join(mounts, ",") +} + +type imageContext struct { + baseSubContext + trunc bool + i types.Image + repo string + tag string + digest string +} + +func (c *imageContext) ID() string { + c.addHeader(imageIDHeader) + if c.trunc { + return stringid.TruncateID(c.i.ID) + } + return c.i.ID +} + +func (c *imageContext) Repository() string { + c.addHeader(repositoryHeader) + return c.repo +} + +func (c *imageContext) Tag() string { + c.addHeader(tagHeader) + return c.tag +} + +func (c *imageContext) Digest() string { + c.addHeader(digestHeader) + return c.digest +} + +func (c *imageContext) CreatedSince() string { + c.addHeader(createdSinceHeader) + createdAt := time.Unix(int64(c.i.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *imageContext) CreatedAt() string { + c.addHeader(createdAtHeader) + return time.Unix(int64(c.i.Created), 0).String() +} + +func (c *imageContext) Size() string { + c.addHeader(sizeHeader) + return units.HumanSize(float64(c.i.Size)) +} + +type subContext interface { + fullHeader() string + addHeader(header string) +} + +type baseSubContext struct { + header []string +} + +func (c *baseSubContext) fullHeader() string { + if c.header == nil { + return "" + } + return strings.Join(c.header, "\t") +} + +func (c *baseSubContext) addHeader(header string) { + if c.header == nil { + c.header = []string{} + } + c.header = append(c.header, strings.ToUpper(header)) +} + +func stripNamePrefix(ss []string) []string { + for i, s := range ss { + ss[i] = s[1:] + } + + return ss +} diff --git a/vendor/github.com/docker/docker/api/client/formatter/formatter.go b/vendor/github.com/docker/docker/api/client/formatter/formatter.go new file mode 100644 index 00000000..bc3d50c9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/formatter/formatter.go @@ -0,0 +1,255 @@ +package formatter + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/tabwriter" + "text/template" + + "github.com/docker/docker/reference" + "github.com/docker/docker/utils/templates" + "github.com/docker/engine-api/types" +) + +const ( + tableFormatKey = "table" + rawFormatKey = "raw" + + defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Ports}}\t{{.Names}}" + defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" + defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" + defaultQuietFormat = "{{.ID}}" +) + +// Context contains information required by the formatter to print the output as desired. +type Context struct { + // Output is the output stream to which the formatted string is written. + Output io.Writer + // Format is used to choose raw, table or custom format for the output. + Format string + // Quiet when set to true will simply print minimal information. + Quiet bool + // Trunc when set to true will truncate the output of certain fields such as Container ID. + Trunc bool + + // internal element + table bool + finalFormat string + header string + buffer *bytes.Buffer +} + +func (c *Context) preformat() { + c.finalFormat = c.Format + + if strings.HasPrefix(c.Format, tableKey) { + c.table = true + c.finalFormat = c.finalFormat[len(tableKey):] + } + + c.finalFormat = strings.Trim(c.finalFormat, " ") + r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") + c.finalFormat = r.Replace(c.finalFormat) +} + +func (c *Context) parseFormat() (*template.Template, error) { + tmpl, err := templates.Parse(c.finalFormat) + if err != nil { + c.buffer.WriteString(fmt.Sprintf("Template parsing error: %v\n", err)) + c.buffer.WriteTo(c.Output) + } + return tmpl, err +} + +func (c *Context) postformat(tmpl *template.Template, subContext subContext) { + if c.table { + if len(c.header) == 0 { + // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template + tmpl.Execute(bytes.NewBufferString(""), subContext) + c.header = subContext.fullHeader() + } + + t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) + t.Write([]byte(c.header)) + t.Write([]byte("\n")) + c.buffer.WriteTo(t) + t.Flush() + } else { + c.buffer.WriteTo(c.Output) + } +} + +func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error { + if err := tmpl.Execute(c.buffer, subContext); err != nil { + c.buffer = bytes.NewBufferString(fmt.Sprintf("Template parsing error: %v\n", err)) + c.buffer.WriteTo(c.Output) + return err + } + if c.table && len(c.header) == 0 { + c.header = subContext.fullHeader() + } + c.buffer.WriteString("\n") + return nil +} + +// ContainerContext contains container specific information required by the formater, encapsulate a Context struct. +type ContainerContext struct { + Context + // Size when set to true will display the size of the output. + Size bool + // Containers + Containers []types.Container +} + +// ImageContext contains image specific information required by the formater, encapsulate a Context struct. +type ImageContext struct { + Context + Digest bool + // Images + Images []types.Image +} + +func (ctx ContainerContext) Write() { + switch ctx.Format { + case tableFormatKey: + ctx.Format = defaultContainerTableFormat + if ctx.Quiet { + ctx.Format = defaultQuietFormat + } + case rawFormatKey: + if ctx.Quiet { + ctx.Format = `container_id: {{.ID}}` + } else { + ctx.Format = `container_id: {{.ID}} +image: {{.Image}} +command: {{.Command}} +created_at: {{.CreatedAt}} +status: {{.Status}} +names: {{.Names}} +labels: {{.Labels}} +ports: {{.Ports}} +` + if ctx.Size { + ctx.Format += `size: {{.Size}} +` + } + } + } + + ctx.buffer = bytes.NewBufferString("") + ctx.preformat() + if ctx.table && ctx.Size { + ctx.finalFormat += "\t{{.Size}}" + } + + tmpl, err := ctx.parseFormat() + if err != nil { + return + } + + for _, container := range ctx.Containers { + containerCtx := &containerContext{ + trunc: ctx.Trunc, + c: container, + } + err = ctx.contextFormat(tmpl, containerCtx) + if err != nil { + return + } + } + + ctx.postformat(tmpl, &containerContext{}) +} + +func (ctx ImageContext) Write() { + switch ctx.Format { + case tableFormatKey: + ctx.Format = defaultImageTableFormat + if ctx.Digest { + ctx.Format = defaultImageTableFormatWithDigest + } + if ctx.Quiet { + ctx.Format = defaultQuietFormat + } + case rawFormatKey: + if ctx.Quiet { + ctx.Format = `image_id: {{.ID}}` + } else { + if ctx.Digest { + ctx.Format = `repository: {{ .Repository }} +tag: {{.Tag}} +digest: {{.Digest}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + } else { + ctx.Format = `repository: {{ .Repository }} +tag: {{.Tag}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + } + } + } + + ctx.buffer = bytes.NewBufferString("") + ctx.preformat() + if ctx.table && ctx.Digest && !strings.Contains(ctx.Format, "{{.Digest}}") { + ctx.finalFormat += "\t{{.Digest}}" + } + + tmpl, err := ctx.parseFormat() + if err != nil { + return + } + + for _, image := range ctx.Images { + + repoTags := image.RepoTags + repoDigests := image.RepoDigests + + if len(repoTags) == 1 && repoTags[0] == ":" && len(repoDigests) == 1 && repoDigests[0] == "@" { + // dangling image - clear out either repoTags or repoDigests so we only show it once below + repoDigests = []string{} + } + // combine the tags and digests lists + tagsAndDigests := append(repoTags, repoDigests...) + for _, repoAndRef := range tagsAndDigests { + repo := "" + tag := "" + digest := "" + + if !strings.HasPrefix(repoAndRef, "") { + ref, err := reference.ParseNamed(repoAndRef) + if err != nil { + continue + } + repo = ref.Name() + + switch x := ref.(type) { + case reference.Canonical: + digest = x.Digest().String() + case reference.NamedTagged: + tag = x.Tag() + } + } + imageCtx := &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: tag, + digest: digest, + } + err = ctx.contextFormat(tmpl, imageCtx) + if err != nil { + return + } + } + } + + ctx.postformat(tmpl, &imageContext{}) +} diff --git a/vendor/github.com/docker/docker/api/client/hijack.go b/vendor/github.com/docker/docker/api/client/hijack.go new file mode 100644 index 00000000..4c80fe1c --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/hijack.go @@ -0,0 +1,56 @@ +package client + +import ( + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/engine-api/types" +) + +func (cli *DockerCli) holdHijackedConnection(tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { + var err error + receiveStdout := make(chan error, 1) + if outputStream != nil || errorStream != nil { + go func() { + // When TTY is ON, use regular copy + if tty && outputStream != nil { + _, err = io.Copy(outputStream, resp.Reader) + } else { + _, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader) + } + logrus.Debugf("[hijack] End of stdout") + receiveStdout <- err + }() + } + + stdinDone := make(chan struct{}) + go func() { + if inputStream != nil { + io.Copy(resp.Conn, inputStream) + logrus.Debugf("[hijack] End of stdin") + } + + if err := resp.CloseWrite(); err != nil { + logrus.Debugf("Couldn't send EOF: %s", err) + } + close(stdinDone) + }() + + select { + case err := <-receiveStdout: + if err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + case <-stdinDone: + if outputStream != nil || errorStream != nil { + if err := <-receiveStdout; err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/history.go b/vendor/github.com/docker/docker/api/client/history.go new file mode 100644 index 00000000..25bb4157 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/history.go @@ -0,0 +1,76 @@ +package client + +import ( + "fmt" + "strconv" + "strings" + "text/tabwriter" + "time" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/go-units" +) + +// CmdHistory shows the history of an image. +// +// Usage: docker history [OPTIONS] IMAGE +func (cli *DockerCli) CmdHistory(args ...string) error { + cmd := Cli.Subcmd("history", []string{"IMAGE"}, Cli.DockerCommands["history"].Description, true) + human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + history, err := cli.client.ImageHistory(context.Background(), cmd.Arg(0)) + if err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + + if *quiet { + for _, entry := range history { + if *noTrunc { + fmt.Fprintf(w, "%s\n", entry.ID) + } else { + fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID)) + } + } + w.Flush() + return nil + } + + var imageID string + var createdBy string + var created string + var size string + + fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") + for _, entry := range history { + imageID = entry.ID + createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1) + if *noTrunc == false { + createdBy = stringutils.Truncate(createdBy, 45) + imageID = stringid.TruncateID(entry.ID) + } + + if *human { + created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago" + size = units.HumanSize(float64(entry.Size)) + } else { + created = time.Unix(entry.Created, 0).Format(time.RFC3339) + size = strconv.FormatInt(entry.Size, 10) + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/images.go b/vendor/github.com/docker/docker/api/client/images.go new file mode 100644 index 00000000..4840b63d --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/images.go @@ -0,0 +1,81 @@ +package client + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/client/formatter" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" +) + +// CmdImages lists the images in a specified repository, or all top-level images if no repository is specified. +// +// Usage: docker images [OPTIONS] [REPOSITORY] +func (cli *DockerCli) CmdImages(args ...string) error { + cmd := Cli.Subcmd("images", []string{"[REPOSITORY[:TAG]]"}, Cli.DockerCommands["images"].Description, true) + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)") + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests") + format := cmd.String([]string{"-format"}, "", "Pretty-print images using a Go template") + + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + cmd.Require(flag.Max, 1) + + cmd.ParseFlags(args, true) + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + imageFilterArgs := filters.NewArgs() + for _, f := range flFilter.GetAll() { + var err error + imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) + if err != nil { + return err + } + } + + var matchName string + if cmd.NArg() == 1 { + matchName = cmd.Arg(0) + } + + options := types.ImageListOptions{ + MatchName: matchName, + All: *all, + Filters: imageFilterArgs, + } + + images, err := cli.client.ImageList(context.Background(), options) + if err != nil { + return err + } + + f := *format + if len(f) == 0 { + if len(cli.ImagesFormat()) > 0 && !*quiet { + f = cli.ImagesFormat() + } else { + f = "table" + } + } + + imagesCtx := formatter.ImageContext{ + Context: formatter.Context{ + Output: cli.out, + Format: f, + Quiet: *quiet, + Trunc: !*noTrunc, + }, + Digest: *showDigests, + Images: images, + } + + imagesCtx.Write() + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/import.go b/vendor/github.com/docker/docker/api/client/import.go new file mode 100644 index 00000000..c96e1e97 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/import.go @@ -0,0 +1,82 @@ +package client + +import ( + "fmt" + "io" + "os" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" +) + +// CmdImport creates an empty filesystem image, imports the contents of the tarball into the image, and optionally tags the image. +// +// The URL argument is the address of a tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) file or a path to local file relative to docker client. If the URL is '-', then the tar file is read from STDIN. +// +// Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] +func (cli *DockerCli) CmdImport(args ...string) error { + cmd := Cli.Subcmd("import", []string{"file|URL|- [REPOSITORY[:TAG]]"}, Cli.DockerCommands["import"].Description, true) + flChanges := opts.NewListOpts(nil) + cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") + message := cmd.String([]string{"m", "-message"}, "", "Set commit message for imported image") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + in io.Reader + tag string + src = cmd.Arg(0) + srcName = src + repository = cmd.Arg(1) + changes = flChanges.GetAll() + ) + + if cmd.NArg() == 3 { + fmt.Fprintf(cli.err, "[DEPRECATED] The format 'file|URL|- [REPOSITORY [TAG]]' has been deprecated. Please use file|URL|- [REPOSITORY[:TAG]]\n") + tag = cmd.Arg(2) + } + + if repository != "" { + //Check if the given image name can be resolved + if _, err := reference.ParseNamed(repository); err != nil { + return err + } + } + + if src == "-" { + in = cli.in + } else if !urlutil.IsURL(src) { + srcName = "-" + file, err := os.Open(src) + if err != nil { + return err + } + defer file.Close() + in = file + } + + options := types.ImageImportOptions{ + Source: in, + SourceName: srcName, + RepositoryName: repository, + Message: *message, + Tag: tag, + Changes: changes, + } + + responseBody, err := cli.client.ImageImport(context.Background(), options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) +} diff --git a/vendor/github.com/docker/docker/api/client/info.go b/vendor/github.com/docker/docker/api/client/info.go new file mode 100644 index 00000000..29df605e --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/info.go @@ -0,0 +1,155 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/ioutils" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils" + "github.com/docker/go-units" +) + +// CmdInfo displays system-wide information. +// +// Usage: docker info +func (cli *DockerCli) CmdInfo(args ...string) error { + cmd := Cli.Subcmd("info", nil, Cli.DockerCommands["info"].Description, true) + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + info, err := cli.client.Info(context.Background()) + if err != nil { + return err + } + + fmt.Fprintf(cli.out, "Containers: %d\n", info.Containers) + fmt.Fprintf(cli.out, " Running: %d\n", info.ContainersRunning) + fmt.Fprintf(cli.out, " Paused: %d\n", info.ContainersPaused) + fmt.Fprintf(cli.out, " Stopped: %d\n", info.ContainersStopped) + fmt.Fprintf(cli.out, "Images: %d\n", info.Images) + ioutils.FprintfIfNotEmpty(cli.out, "Server Version: %s\n", info.ServerVersion) + ioutils.FprintfIfNotEmpty(cli.out, "Storage Driver: %s\n", info.Driver) + if info.DriverStatus != nil { + for _, pair := range info.DriverStatus { + fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) + + // print a warning if devicemapper is using a loopback file + if pair[0] == "Data loop file" { + fmt.Fprintln(cli.err, " WARNING: Usage of loopback devices is strongly discouraged for production use. Either use `--storage-opt dm.thinpooldev` or use `--storage-opt dm.no_warn_on_loop_devices=true` to suppress this warning.") + } + } + + } + if info.SystemStatus != nil { + for _, pair := range info.SystemStatus { + fmt.Fprintf(cli.out, "%s: %s\n", pair[0], pair[1]) + } + } + ioutils.FprintfIfNotEmpty(cli.out, "Execution Driver: %s\n", info.ExecutionDriver) + ioutils.FprintfIfNotEmpty(cli.out, "Logging Driver: %s\n", info.LoggingDriver) + ioutils.FprintfIfNotEmpty(cli.out, "Cgroup Driver: %s\n", info.CgroupDriver) + + fmt.Fprintf(cli.out, "Plugins: \n") + fmt.Fprintf(cli.out, " Volume:") + fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Volume, " ")) + fmt.Fprintf(cli.out, "\n") + fmt.Fprintf(cli.out, " Network:") + fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Network, " ")) + fmt.Fprintf(cli.out, "\n") + + if len(info.Plugins.Authorization) != 0 { + fmt.Fprintf(cli.out, " Authorization:") + fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Authorization, " ")) + fmt.Fprintf(cli.out, "\n") + } + + ioutils.FprintfIfNotEmpty(cli.out, "Kernel Version: %s\n", info.KernelVersion) + ioutils.FprintfIfNotEmpty(cli.out, "Operating System: %s\n", info.OperatingSystem) + ioutils.FprintfIfNotEmpty(cli.out, "OSType: %s\n", info.OSType) + ioutils.FprintfIfNotEmpty(cli.out, "Architecture: %s\n", info.Architecture) + fmt.Fprintf(cli.out, "CPUs: %d\n", info.NCPU) + fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) + ioutils.FprintfIfNotEmpty(cli.out, "Name: %s\n", info.Name) + ioutils.FprintfIfNotEmpty(cli.out, "ID: %s\n", info.ID) + fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", info.DockerRootDir) + fmt.Fprintf(cli.out, "Debug mode (client): %v\n", utils.IsDebugEnabled()) + fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug) + + if info.Debug { + fmt.Fprintf(cli.out, " File Descriptors: %d\n", info.NFd) + fmt.Fprintf(cli.out, " Goroutines: %d\n", info.NGoroutines) + fmt.Fprintf(cli.out, " System Time: %s\n", info.SystemTime) + fmt.Fprintf(cli.out, " EventsListeners: %d\n", info.NEventsListener) + } + + ioutils.FprintfIfNotEmpty(cli.out, "Http Proxy: %s\n", info.HTTPProxy) + ioutils.FprintfIfNotEmpty(cli.out, "Https Proxy: %s\n", info.HTTPSProxy) + ioutils.FprintfIfNotEmpty(cli.out, "No Proxy: %s\n", info.NoProxy) + + if info.IndexServerAddress != "" { + u := cli.configFile.AuthConfigs[info.IndexServerAddress].Username + if len(u) > 0 { + fmt.Fprintf(cli.out, "Username: %v\n", u) + } + fmt.Fprintf(cli.out, "Registry: %v\n", info.IndexServerAddress) + } + + // Only output these warnings if the server does not support these features + if info.OSType != "windows" { + if !info.MemoryLimit { + fmt.Fprintln(cli.err, "WARNING: No memory limit support") + } + if !info.SwapLimit { + fmt.Fprintln(cli.err, "WARNING: No swap limit support") + } + if !info.KernelMemory { + fmt.Fprintln(cli.err, "WARNING: No kernel memory limit support") + } + if !info.OomKillDisable { + fmt.Fprintln(cli.err, "WARNING: No oom kill disable support") + } + if !info.CPUCfsQuota { + fmt.Fprintln(cli.err, "WARNING: No cpu cfs quota support") + } + if !info.CPUCfsPeriod { + fmt.Fprintln(cli.err, "WARNING: No cpu cfs period support") + } + if !info.CPUShares { + fmt.Fprintln(cli.err, "WARNING: No cpu shares support") + } + if !info.CPUSet { + fmt.Fprintln(cli.err, "WARNING: No cpuset support") + } + if !info.IPv4Forwarding { + fmt.Fprintln(cli.err, "WARNING: IPv4 forwarding is disabled") + } + if !info.BridgeNfIptables { + fmt.Fprintln(cli.err, "WARNING: bridge-nf-call-iptables is disabled") + } + if !info.BridgeNfIP6tables { + fmt.Fprintln(cli.err, "WARNING: bridge-nf-call-ip6tables is disabled") + } + } + + if info.Labels != nil { + fmt.Fprintln(cli.out, "Labels:") + for _, attribute := range info.Labels { + fmt.Fprintf(cli.out, " %s\n", attribute) + } + } + + ioutils.FprintfIfTrue(cli.out, "Experimental: %v\n", info.ExperimentalBuild) + if info.ClusterStore != "" { + fmt.Fprintf(cli.out, "Cluster store: %s\n", info.ClusterStore) + } + + if info.ClusterAdvertise != "" { + fmt.Fprintf(cli.out, "Cluster advertise: %s\n", info.ClusterAdvertise) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/inspect.go b/vendor/github.com/docker/docker/api/client/inspect.go new file mode 100644 index 00000000..2e97a5aa --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/inspect.go @@ -0,0 +1,127 @@ +package client + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/client/inspect" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils/templates" + "github.com/docker/engine-api/client" +) + +// CmdInspect displays low-level information on one or more containers or images. +// +// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...] +func (cli *DockerCli) CmdInspect(args ...string) error { + cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE [CONTAINER|IMAGE...]"}, Cli.DockerCommands["inspect"].Description, true) + tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") + inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image or container)") + size := cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes if the type is container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + if *inspectType != "" && *inspectType != "container" && *inspectType != "image" { + return fmt.Errorf("%q is not a valid value for --type", *inspectType) + } + + var elementSearcher inspectSearcher + switch *inspectType { + case "container": + elementSearcher = cli.inspectContainers(*size) + case "image": + elementSearcher = cli.inspectImages(*size) + default: + elementSearcher = cli.inspectAll(*size) + } + + return cli.inspectElements(*tmplStr, cmd.Args(), elementSearcher) +} + +func (cli *DockerCli) inspectContainers(getSize bool) inspectSearcher { + return func(ref string) (interface{}, []byte, error) { + return cli.client.ContainerInspectWithRaw(context.Background(), ref, getSize) + } +} + +func (cli *DockerCli) inspectImages(getSize bool) inspectSearcher { + return func(ref string) (interface{}, []byte, error) { + return cli.client.ImageInspectWithRaw(context.Background(), ref, getSize) + } +} + +func (cli *DockerCli) inspectAll(getSize bool) inspectSearcher { + return func(ref string) (interface{}, []byte, error) { + c, rawContainer, err := cli.client.ContainerInspectWithRaw(context.Background(), ref, getSize) + if err != nil { + // Search for image with that id if a container doesn't exist. + if client.IsErrContainerNotFound(err) { + i, rawImage, err := cli.client.ImageInspectWithRaw(context.Background(), ref, getSize) + if err != nil { + if client.IsErrImageNotFound(err) { + return nil, nil, fmt.Errorf("Error: No such image or container: %s", ref) + } + return nil, nil, err + } + return i, rawImage, err + } + return nil, nil, err + } + return c, rawContainer, err + } +} + +type inspectSearcher func(ref string) (interface{}, []byte, error) + +func (cli *DockerCli) inspectElements(tmplStr string, references []string, searchByReference inspectSearcher) error { + elementInspector, err := cli.newInspectorWithTemplate(tmplStr) + if err != nil { + return Cli.StatusError{StatusCode: 64, Status: err.Error()} + } + + var inspectErr error + for _, ref := range references { + element, raw, err := searchByReference(ref) + if err != nil { + inspectErr = err + break + } + + if err := elementInspector.Inspect(element, raw); err != nil { + inspectErr = err + break + } + } + + if err := elementInspector.Flush(); err != nil { + cli.inspectErrorStatus(err) + } + + if status := cli.inspectErrorStatus(inspectErr); status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} + +func (cli *DockerCli) inspectErrorStatus(err error) (status int) { + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + } + return +} + +func (cli *DockerCli) newInspectorWithTemplate(tmplStr string) (inspect.Inspector, error) { + elementInspector := inspect.NewIndentedInspector(cli.out) + if tmplStr != "" { + tmpl, err := templates.Parse(tmplStr) + if err != nil { + return nil, fmt.Errorf("Template parsing error: %s", err) + } + elementInspector = inspect.NewTemplateInspector(cli.out, tmpl) + } + return elementInspector, nil +} diff --git a/vendor/github.com/docker/docker/api/client/inspect/inspector.go b/vendor/github.com/docker/docker/api/client/inspect/inspector.go new file mode 100644 index 00000000..a1d16d47 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/inspect/inspector.go @@ -0,0 +1,119 @@ +package inspect + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "text/template" +) + +// Inspector defines an interface to implement to process elements +type Inspector interface { + Inspect(typedElement interface{}, rawElement []byte) error + Flush() error +} + +// TemplateInspector uses a text template to inspect elements. +type TemplateInspector struct { + outputStream io.Writer + buffer *bytes.Buffer + tmpl *template.Template +} + +// NewTemplateInspector creates a new inspector with a template. +func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { + return &TemplateInspector{ + outputStream: outputStream, + buffer: new(bytes.Buffer), + tmpl: tmpl, + } +} + +// Inspect executes the inspect template. +// It decodes the raw element into a map if the initial execution fails. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { + buffer := new(bytes.Buffer) + if err := i.tmpl.Execute(buffer, typedElement); err != nil { + if rawElement == nil { + return fmt.Errorf("Template parsing error: %v", err) + } + return i.tryRawInspectFallback(rawElement, err) + } + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// Flush write the result of inspecting all elements into the output stream. +func (i *TemplateInspector) Flush() error { + if i.buffer.Len() == 0 { + _, err := io.WriteString(i.outputStream, "\n") + return err + } + _, err := io.Copy(i.outputStream, i.buffer) + return err +} + +// IndentedInspector uses a buffer to stop the indented representation of an element. +type IndentedInspector struct { + outputStream io.Writer + elements []interface{} + rawElements [][]byte +} + +// NewIndentedInspector generates a new IndentedInspector. +func NewIndentedInspector(outputStream io.Writer) Inspector { + return &IndentedInspector{ + outputStream: outputStream, + } +} + +// Inspect writes the raw element with an indented json format. +func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { + if rawElement != nil { + i.rawElements = append(i.rawElements, rawElement) + } else { + i.elements = append(i.elements, typedElement) + } + return nil +} + +// Flush write the result of inspecting all elements into the output stream. +func (i *IndentedInspector) Flush() error { + if len(i.elements) == 0 && len(i.rawElements) == 0 { + _, err := io.WriteString(i.outputStream, "[]\n") + return err + } + + var buffer io.Reader + if len(i.rawElements) > 0 { + bytesBuffer := new(bytes.Buffer) + bytesBuffer.WriteString("[") + for idx, r := range i.rawElements { + bytesBuffer.Write(r) + if idx < len(i.rawElements)-1 { + bytesBuffer.WriteString(",") + } + } + bytesBuffer.WriteString("]") + indented := new(bytes.Buffer) + if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { + return err + } + buffer = indented + } else { + b, err := json.MarshalIndent(i.elements, "", " ") + if err != nil { + return err + } + buffer = bytes.NewReader(b) + } + + if _, err := io.Copy(i.outputStream, buffer); err != nil { + return err + } + _, err := io.WriteString(i.outputStream, "\n") + return err +} diff --git a/vendor/github.com/docker/docker/api/client/inspect/inspector_go14.go b/vendor/github.com/docker/docker/api/client/inspect/inspector_go14.go new file mode 100644 index 00000000..39a0510c --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/inspect/inspector_go14.go @@ -0,0 +1,40 @@ +// +build !go1.5 + +package inspect + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +// tryeRawInspectFallback executes the inspect template with a raw interface. +// This allows docker cli to parse inspect structs injected with Swarm fields. +// Unfortunately, go 1.4 doesn't fail executing invalid templates when the input is an interface. +// It doesn't allow to modify this behavior either, sending messages to the output. +// We assume that the template is invalid when there is a , if the template was valid +// we'd get or "" values. In that case we fail with the original error raised executing the +// template with the typed input. +func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte, originalErr error) error { + var raw interface{} + buffer := new(bytes.Buffer) + rdr := bytes.NewReader(rawElement) + dec := json.NewDecoder(rdr) + + if rawErr := dec.Decode(&raw); rawErr != nil { + return fmt.Errorf("unable to read inspect data: %v", rawErr) + } + + if rawErr := i.tmpl.Execute(buffer, raw); rawErr != nil { + return fmt.Errorf("Template parsing error: %v", rawErr) + } + + if strings.Contains(buffer.String(), "") { + return fmt.Errorf("Template parsing error: %v", originalErr) + } + + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/inspect/inspector_go15.go b/vendor/github.com/docker/docker/api/client/inspect/inspector_go15.go new file mode 100644 index 00000000..b098f415 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/inspect/inspector_go15.go @@ -0,0 +1,29 @@ +// +build go1.5 + +package inspect + +import ( + "bytes" + "encoding/json" + "fmt" +) + +func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte, _ error) error { + var raw interface{} + buffer := new(bytes.Buffer) + rdr := bytes.NewReader(rawElement) + dec := json.NewDecoder(rdr) + + if rawErr := dec.Decode(&raw); rawErr != nil { + return fmt.Errorf("unable to read inspect data: %v", rawErr) + } + + tmplMissingKey := i.tmpl.Option("missingkey=error") + if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { + return fmt.Errorf("Template parsing error: %v", rawErr) + } + + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/kill.go b/vendor/github.com/docker/docker/api/client/kill.go new file mode 100644 index 00000000..9841ba4d --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/kill.go @@ -0,0 +1,35 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdKill kills one or more running container using SIGKILL or a specified signal. +// +// Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdKill(args ...string) error { + cmd := Cli.Subcmd("kill", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["kill"].Description, true) + signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerKill(context.Background(), name, *signal); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/load.go b/vendor/github.com/docker/docker/api/client/load.go new file mode 100644 index 00000000..820fdc0e --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/load.go @@ -0,0 +1,50 @@ +package client + +import ( + "io" + "os" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdLoad loads an image from a tar archive. +// +// The tar archive is read from STDIN by default, or from a tar archive file. +// +// Usage: docker load [OPTIONS] +func (cli *DockerCli) CmdLoad(args ...string) error { + cmd := Cli.Subcmd("load", nil, Cli.DockerCommands["load"].Description, true) + infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the load output") + cmd.Require(flag.Exact, 0) + cmd.ParseFlags(args, true) + + var input io.Reader = cli.in + if *infile != "" { + file, err := os.Open(*infile) + if err != nil { + return err + } + defer file.Close() + input = file + } + if !cli.isTerminalOut { + *quiet = true + } + response, err := cli.client.ImageLoad(context.Background(), input, *quiet) + if err != nil { + return err + } + defer response.Body.Close() + + if response.Body != nil && response.JSON { + return jsonmessage.DisplayJSONMessagesStream(response.Body, cli.out, cli.outFd, cli.isTerminalOut, nil) + } + + _, err = io.Copy(cli.out, response.Body) + return err +} diff --git a/vendor/github.com/docker/docker/api/client/login.go b/vendor/github.com/docker/docker/api/client/login.go new file mode 100644 index 00000000..a772348c --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/login.go @@ -0,0 +1,177 @@ +package client + +import ( + "bufio" + "fmt" + "io" + "os" + "runtime" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/cliconfig/credentials" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/term" + "github.com/docker/engine-api/types" +) + +// CmdLogin logs in a user to a Docker registry service. +// +// If no server is specified, the user will be logged into or registered to the registry's index server. +// +// Usage: docker login SERVER +func (cli *DockerCli) CmdLogin(args ...string) error { + cmd := Cli.Subcmd("login", []string{"[SERVER]"}, Cli.DockerCommands["login"].Description+".\nIf no server is specified, the default is defined by the daemon.", true) + cmd.Require(flag.Max, 1) + + flUser := cmd.String([]string{"u", "-username"}, "", "Username") + flPassword := cmd.String([]string{"p", "-password"}, "", "Password") + + // Deprecated in 1.11: Should be removed in docker 1.13 + cmd.String([]string{"#e", "#-email"}, "", "Email") + + cmd.ParseFlags(args, true) + + // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 + if runtime.GOOS == "windows" { + cli.in = os.Stdin + } + + var serverAddress string + var isDefaultRegistry bool + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } else { + serverAddress = cli.electAuthServer() + isDefaultRegistry = true + } + + authConfig, err := cli.configureAuth(*flUser, *flPassword, serverAddress, isDefaultRegistry) + if err != nil { + return err + } + + response, err := cli.client.RegistryLogin(context.Background(), authConfig) + if err != nil { + return err + } + + if response.IdentityToken != "" { + authConfig.Password = "" + authConfig.IdentityToken = response.IdentityToken + } + if err := storeCredentials(cli.configFile, authConfig); err != nil { + return fmt.Errorf("Error saving credentials: %v", err) + } + + if response.Status != "" { + fmt.Fprintln(cli.out, response.Status) + } + return nil +} + +func (cli *DockerCli) promptWithDefault(prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(cli.out, "%s: ", prompt) + } else { + fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) + } +} + +func (cli *DockerCli) configureAuth(flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) { + authconfig, err := getCredentials(cli.configFile, serverAddress) + if err != nil { + return authconfig, err + } + + authconfig.Username = strings.TrimSpace(authconfig.Username) + + if flUser = strings.TrimSpace(flUser); flUser == "" { + if isDefaultRegistry { + // if this is a defauly registry (docker hub), then display the following message. + fmt.Fprintln(cli.out, "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") + } + cli.promptWithDefault("Username", authconfig.Username) + flUser = readInput(cli.in, cli.out) + flUser = strings.TrimSpace(flUser) + if flUser == "" { + flUser = authconfig.Username + } + } + + if flUser == "" { + return authconfig, fmt.Errorf("Error: Non-null Username Required") + } + + if flPassword == "" { + oldState, err := term.SaveState(cli.inFd) + if err != nil { + return authconfig, err + } + fmt.Fprintf(cli.out, "Password: ") + term.DisableEcho(cli.inFd, oldState) + + flPassword = readInput(cli.in, cli.out) + fmt.Fprint(cli.out, "\n") + + term.RestoreTerminal(cli.inFd, oldState) + if flPassword == "" { + return authconfig, fmt.Errorf("Error: Password Required") + } + } + + authconfig.Username = flUser + authconfig.Password = flPassword + authconfig.ServerAddress = serverAddress + authconfig.IdentityToken = "" + + return authconfig, nil +} + +func readInput(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) +} + +// getCredentials loads the user credentials from a credentials store. +// The store is determined by the config file settings. +func getCredentials(c *cliconfig.ConfigFile, serverAddress string) (types.AuthConfig, error) { + s := loadCredentialsStore(c) + return s.Get(serverAddress) +} + +func getAllCredentials(c *cliconfig.ConfigFile) (map[string]types.AuthConfig, error) { + s := loadCredentialsStore(c) + return s.GetAll() +} + +// storeCredentials saves the user credentials in a credentials store. +// The store is determined by the config file settings. +func storeCredentials(c *cliconfig.ConfigFile, auth types.AuthConfig) error { + s := loadCredentialsStore(c) + return s.Store(auth) +} + +// eraseCredentials removes the user credentials from a credentials store. +// The store is determined by the config file settings. +func eraseCredentials(c *cliconfig.ConfigFile, serverAddress string) error { + s := loadCredentialsStore(c) + return s.Erase(serverAddress) +} + +// loadCredentialsStore initializes a new credentials store based +// in the settings provided in the configuration file. +func loadCredentialsStore(c *cliconfig.ConfigFile) credentials.Store { + if c.CredentialsStore != "" { + return credentials.NewNativeStore(c) + } + return credentials.NewFileStore(c) +} diff --git a/vendor/github.com/docker/docker/api/client/logout.go b/vendor/github.com/docker/docker/api/client/logout.go new file mode 100644 index 00000000..b5ff59dd --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/logout.go @@ -0,0 +1,41 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdLogout logs a user out from a Docker registry. +// +// If no server is specified, the user will be logged out from the registry's index server. +// +// Usage: docker logout [SERVER] +func (cli *DockerCli) CmdLogout(args ...string) error { + cmd := Cli.Subcmd("logout", []string{"[SERVER]"}, Cli.DockerCommands["logout"].Description+".\nIf no server is specified, the default is defined by the daemon.", true) + cmd.Require(flag.Max, 1) + + cmd.ParseFlags(args, true) + + var serverAddress string + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } else { + serverAddress = cli.electAuthServer() + } + + // check if we're logged in based on the records in the config file + // which means it couldn't have user/pass cause they may be in the creds store + if _, ok := cli.configFile.AuthConfigs[serverAddress]; !ok { + fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress) + return nil + } + + fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) + if err := eraseCredentials(cli.configFile, serverAddress); err != nil { + fmt.Fprintf(cli.out, "WARNING: could not erase credentials: %v\n", err) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/logs.go b/vendor/github.com/docker/docker/api/client/logs.go new file mode 100644 index 00000000..7cd5605e --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/logs.go @@ -0,0 +1,65 @@ +package client + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/engine-api/types" +) + +var validDrivers = map[string]bool{ + "json-file": true, + "journald": true, +} + +// CmdLogs fetches the logs of a given container. +// +// docker logs [OPTIONS] CONTAINER +func (cli *DockerCli) CmdLogs(args ...string) error { + cmd := Cli.Subcmd("logs", []string{"CONTAINER"}, Cli.DockerCommands["logs"].Description, true) + follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") + since := cmd.String([]string{"-since"}, "", "Show logs since timestamp") + times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") + tail := cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + name := cmd.Arg(0) + + c, err := cli.client.ContainerInspect(context.Background(), name) + if err != nil { + return err + } + + if !validDrivers[c.HostConfig.LogConfig.Type] { + return fmt.Errorf("\"logs\" command is supported only for \"json-file\" and \"journald\" logging drivers (got: %s)", c.HostConfig.LogConfig.Type) + } + + options := types.ContainerLogsOptions{ + ContainerID: name, + ShowStdout: true, + ShowStderr: true, + Since: *since, + Timestamps: *times, + Follow: *follow, + Tail: *tail, + } + responseBody, err := cli.client.ContainerLogs(context.Background(), options) + if err != nil { + return err + } + defer responseBody.Close() + + if c.Config.Tty { + _, err = io.Copy(cli.out, responseBody) + } else { + _, err = stdcopy.StdCopy(cli.out, cli.err, responseBody) + } + return err +} diff --git a/vendor/github.com/docker/docker/api/client/network.go b/vendor/github.com/docker/docker/api/client/network.go new file mode 100644 index 00000000..4bbc7154 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/network.go @@ -0,0 +1,392 @@ +package client + +import ( + "fmt" + "net" + "sort" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stringid" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "github.com/docker/engine-api/types/network" +) + +// CmdNetwork is the parent subcommand for all network commands +// +// Usage: docker network [OPTIONS] +func (cli *DockerCli) CmdNetwork(args ...string) error { + cmd := Cli.Subcmd("network", []string{"COMMAND [OPTIONS]"}, networkUsage(), false) + cmd.Require(flag.Min, 1) + err := cmd.ParseFlags(args, true) + cmd.Usage() + return err +} + +// CmdNetworkCreate creates a new network with a given name +// +// Usage: docker network create [OPTIONS] +func (cli *DockerCli) CmdNetworkCreate(args ...string) error { + cmd := Cli.Subcmd("network create", []string{"NETWORK-NAME"}, "Creates a new network with a name specified by the user", false) + flDriver := cmd.String([]string{"d", "-driver"}, "bridge", "Driver to manage the Network") + flOpts := opts.NewMapOpts(nil, nil) + + flIpamDriver := cmd.String([]string{"-ipam-driver"}, "default", "IP Address Management Driver") + flIpamSubnet := opts.NewListOpts(nil) + flIpamIPRange := opts.NewListOpts(nil) + flIpamGateway := opts.NewListOpts(nil) + flIpamAux := opts.NewMapOpts(nil, nil) + flIpamOpt := opts.NewMapOpts(nil, nil) + flLabels := opts.NewListOpts(nil) + + cmd.Var(&flIpamSubnet, []string{"-subnet"}, "subnet in CIDR format that represents a network segment") + cmd.Var(&flIpamIPRange, []string{"-ip-range"}, "allocate container ip from a sub-range") + cmd.Var(&flIpamGateway, []string{"-gateway"}, "ipv4 or ipv6 Gateway for the master subnet") + cmd.Var(flIpamAux, []string{"-aux-address"}, "auxiliary ipv4 or ipv6 addresses used by Network driver") + cmd.Var(flOpts, []string{"o", "-opt"}, "set driver specific options") + cmd.Var(flIpamOpt, []string{"-ipam-opt"}, "set IPAM driver specific options") + cmd.Var(&flLabels, []string{"-label"}, "set metadata on a network") + + flInternal := cmd.Bool([]string{"-internal"}, false, "restricts external access to the network") + flIPv6 := cmd.Bool([]string{"-ipv6"}, false, "enable IPv6 networking") + + cmd.Require(flag.Exact, 1) + err := cmd.ParseFlags(args, true) + if err != nil { + return err + } + + // Set the default driver to "" if the user didn't set the value. + // That way we can know whether it was user input or not. + driver := *flDriver + if !cmd.IsSet("-driver") && !cmd.IsSet("d") { + driver = "" + } + + ipamCfg, err := consolidateIpam(flIpamSubnet.GetAll(), flIpamIPRange.GetAll(), flIpamGateway.GetAll(), flIpamAux.GetAll()) + if err != nil { + return err + } + + // Construct network create request body + nc := types.NetworkCreate{ + Name: cmd.Arg(0), + Driver: driver, + IPAM: network.IPAM{Driver: *flIpamDriver, Config: ipamCfg, Options: flIpamOpt.GetAll()}, + Options: flOpts.GetAll(), + CheckDuplicate: true, + Internal: *flInternal, + EnableIPv6: *flIPv6, + Labels: runconfigopts.ConvertKVStringsToMap(flLabels.GetAll()), + } + + resp, err := cli.client.NetworkCreate(context.Background(), nc) + if err != nil { + return err + } + fmt.Fprintf(cli.out, "%s\n", resp.ID) + return nil +} + +// CmdNetworkRm deletes one or more networks +// +// Usage: docker network rm NETWORK-NAME|NETWORK-ID [NETWORK-NAME|NETWORK-ID...] +func (cli *DockerCli) CmdNetworkRm(args ...string) error { + cmd := Cli.Subcmd("network rm", []string{"NETWORK [NETWORK...]"}, "Deletes one or more networks", false) + cmd.Require(flag.Min, 1) + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + + status := 0 + for _, net := range cmd.Args() { + if err := cli.client.NetworkRemove(context.Background(), net); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} + +// CmdNetworkConnect connects a container to a network +// +// Usage: docker network connect [OPTIONS] +func (cli *DockerCli) CmdNetworkConnect(args ...string) error { + cmd := Cli.Subcmd("network connect", []string{"NETWORK CONTAINER"}, "Connects a container to a network", false) + flIPAddress := cmd.String([]string{"-ip"}, "", "IP Address") + flIPv6Address := cmd.String([]string{"-ip6"}, "", "IPv6 Address") + flLinks := opts.NewListOpts(runconfigopts.ValidateLink) + cmd.Var(&flLinks, []string{"-link"}, "Add link to another container") + flAliases := opts.NewListOpts(nil) + cmd.Var(&flAliases, []string{"-alias"}, "Add network-scoped alias for the container") + cmd.Require(flag.Min, 2) + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + epConfig := &network.EndpointSettings{ + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: *flIPAddress, + IPv6Address: *flIPv6Address, + }, + Links: flLinks.GetAll(), + Aliases: flAliases.GetAll(), + } + return cli.client.NetworkConnect(context.Background(), cmd.Arg(0), cmd.Arg(1), epConfig) +} + +// CmdNetworkDisconnect disconnects a container from a network +// +// Usage: docker network disconnect +func (cli *DockerCli) CmdNetworkDisconnect(args ...string) error { + cmd := Cli.Subcmd("network disconnect", []string{"NETWORK CONTAINER"}, "Disconnects container from a network", false) + force := cmd.Bool([]string{"f", "-force"}, false, "Force the container to disconnect from a network") + cmd.Require(flag.Exact, 2) + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + + return cli.client.NetworkDisconnect(context.Background(), cmd.Arg(0), cmd.Arg(1), *force) +} + +// CmdNetworkLs lists all the networks managed by docker daemon +// +// Usage: docker network ls [OPTIONS] +func (cli *DockerCli) CmdNetworkLs(args ...string) error { + cmd := Cli.Subcmd("network ls", nil, "Lists networks", true) + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Do not truncate the output") + + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + + cmd.Require(flag.Exact, 0) + err := cmd.ParseFlags(args, true) + if err != nil { + return err + } + + // Consolidate all filter flags, and sanity check them early. + // They'll get process after get response from server. + netFilterArgs := filters.NewArgs() + for _, f := range flFilter.GetAll() { + if netFilterArgs, err = filters.ParseFlag(f, netFilterArgs); err != nil { + return err + } + } + + options := types.NetworkListOptions{ + Filters: netFilterArgs, + } + + networkResources, err := cli.client.NetworkList(context.Background(), options) + if err != nil { + return err + } + + wr := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + + // unless quiet (-q) is specified, print field titles + if !*quiet { + fmt.Fprintln(wr, "NETWORK ID\tNAME\tDRIVER") + } + sort.Sort(byNetworkName(networkResources)) + for _, networkResource := range networkResources { + ID := networkResource.ID + netName := networkResource.Name + if !*noTrunc { + ID = stringid.TruncateID(ID) + } + if *quiet { + fmt.Fprintln(wr, ID) + continue + } + driver := networkResource.Driver + fmt.Fprintf(wr, "%s\t%s\t%s\t", + ID, + netName, + driver) + fmt.Fprint(wr, "\n") + } + wr.Flush() + return nil +} + +type byNetworkName []types.NetworkResource + +func (r byNetworkName) Len() int { return len(r) } +func (r byNetworkName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byNetworkName) Less(i, j int) bool { return r[i].Name < r[j].Name } + +// CmdNetworkInspect inspects the network object for more details +// +// Usage: docker network inspect [OPTIONS] [NETWORK...] +func (cli *DockerCli) CmdNetworkInspect(args ...string) error { + cmd := Cli.Subcmd("network inspect", []string{"NETWORK [NETWORK...]"}, "Displays detailed information on one or more networks", false) + tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") + cmd.Require(flag.Min, 1) + + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + + inspectSearcher := func(name string) (interface{}, []byte, error) { + i, err := cli.client.NetworkInspect(context.Background(), name) + return i, nil, err + } + + return cli.inspectElements(*tmplStr, cmd.Args(), inspectSearcher) +} + +// Consolidates the ipam configuration as a group from different related configurations +// user can configure network with multiple non-overlapping subnets and hence it is +// possible to correlate the various related parameters and consolidate them. +// consoidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into +// structured ipam data. +func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { + if len(subnets) < len(ranges) || len(subnets) < len(gateways) { + return nil, fmt.Errorf("every ip-range or gateway must have a corresponding subnet") + } + iData := map[string]*network.IPAMConfig{} + + // Populate non-overlapping subnets into consolidation map + for _, s := range subnets { + for k := range iData { + ok1, err := subnetMatches(s, k) + if err != nil { + return nil, err + } + ok2, err := subnetMatches(k, s) + if err != nil { + return nil, err + } + if ok1 || ok2 { + return nil, fmt.Errorf("multiple overlapping subnet configuration is not supported") + } + } + iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} + } + + // Validate and add valid ip ranges + for _, r := range ranges { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, r) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].IPRange != "" { + return nil, fmt.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) + } + d := iData[s] + d.IPRange = r + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for range %s", r) + } + } + + // Validate and add valid gateways + for _, g := range gateways { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, g) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].Gateway != "" { + return nil, fmt.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) + } + d := iData[s] + d.Gateway = g + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for gateway %s", g) + } + } + + // Validate and add aux-addresses + for key, aa := range auxaddrs { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, aa) + if err != nil { + return nil, err + } + if !ok { + continue + } + iData[s].AuxAddress[key] = aa + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for aux-address %s", aa) + } + } + + idl := []network.IPAMConfig{} + for _, v := range iData { + idl = append(idl, *v) + } + return idl, nil +} + +func subnetMatches(subnet, data string) (bool, error) { + var ( + ip net.IP + ) + + _, s, err := net.ParseCIDR(subnet) + if err != nil { + return false, fmt.Errorf("Invalid subnet %s : %v", s, err) + } + + if strings.Contains(data, "/") { + ip, _, err = net.ParseCIDR(data) + if err != nil { + return false, fmt.Errorf("Invalid cidr %s : %v", data, err) + } + } else { + ip = net.ParseIP(data) + } + + return s.Contains(ip), nil +} + +func networkUsage() string { + networkCommands := map[string]string{ + "create": "Create a network", + "connect": "Connect container to a network", + "disconnect": "Disconnect container from a network", + "inspect": "Display detailed network information", + "ls": "List all networks", + "rm": "Remove a network", + } + + help := "Commands:\n" + + for cmd, description := range networkCommands { + help += fmt.Sprintf(" %-25.25s%s\n", cmd, description) + } + + help += fmt.Sprintf("\nRun 'docker network COMMAND --help' for more information on a command.") + return help +} diff --git a/vendor/github.com/docker/docker/api/client/pause.go b/vendor/github.com/docker/docker/api/client/pause.go new file mode 100644 index 00000000..ffba1c9a --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/pause.go @@ -0,0 +1,34 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdPause pauses all processes within one or more containers. +// +// Usage: docker pause CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdPause(args ...string) error { + cmd := Cli.Subcmd("pause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["pause"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerPause(context.Background(), name); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/port.go b/vendor/github.com/docker/docker/api/client/port.go new file mode 100644 index 00000000..9b545f56 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/port.go @@ -0,0 +1,61 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/go-connections/nat" +) + +// CmdPort lists port mappings for a container. +// If a private port is specified, it also shows the public-facing port that is NATed to the private port. +// +// Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] +func (cli *DockerCli) CmdPort(args ...string) error { + cmd := Cli.Subcmd("port", []string{"CONTAINER [PRIVATE_PORT[/PROTO]]"}, Cli.DockerCommands["port"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + c, err := cli.client.ContainerInspect(context.Background(), cmd.Arg(0)) + if err != nil { + return err + } + + if cmd.NArg() == 2 { + var ( + port = cmd.Arg(1) + proto = "tcp" + parts = strings.SplitN(port, "/", 2) + ) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + natPort := port + "/" + proto + newP, err := nat.NewPort(proto, port) + if err != nil { + return err + } + if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIP, frontend.HostPort) + } + return nil + } + return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0)) + } + + for from, frontends := range c.NetworkSettings.Ports { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/ps.go b/vendor/github.com/docker/docker/api/client/ps.go new file mode 100644 index 00000000..3627ffc1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/ps.go @@ -0,0 +1,89 @@ +package client + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/client/formatter" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" +) + +// CmdPs outputs a list of Docker containers. +// +// Usage: docker ps [OPTIONS] +func (cli *DockerCli) CmdPs(args ...string) error { + var ( + err error + + psFilterArgs = filters.NewArgs() + + cmd = Cli.Subcmd("ps", nil, Cli.DockerCommands["ps"].Description, true) + quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + size = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes") + all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") + noTrunc = cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show the latest created container (includes all states)") + since = cmd.String([]string{"#-since"}, "", "Show containers created since Id or Name (includes all states)") + before = cmd.String([]string{"#-before"}, "", "Only show containers created before Id or Name") + last = cmd.Int([]string{"n"}, -1, "Show n last created containers (includes all states)") + format = cmd.String([]string{"-format"}, "", "Pretty-print containers using a Go template") + flFilter = opts.NewListOpts(nil) + ) + cmd.Require(flag.Exact, 0) + + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + + cmd.ParseFlags(args, true) + if *last == -1 && *nLatest { + *last = 1 + } + + // Consolidate all filter flags, and sanity check them. + // They'll get processed in the daemon/server. + for _, f := range flFilter.GetAll() { + if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil { + return err + } + } + + options := types.ContainerListOptions{ + All: *all, + Limit: *last, + Since: *since, + Before: *before, + Size: *size, + Filter: psFilterArgs, + } + + containers, err := cli.client.ContainerList(context.Background(), options) + if err != nil { + return err + } + + f := *format + if len(f) == 0 { + if len(cli.PsFormat()) > 0 && !*quiet { + f = cli.PsFormat() + } else { + f = "table" + } + } + + psCtx := formatter.ContainerContext{ + Context: formatter.Context{ + Output: cli.out, + Format: f, + Quiet: *quiet, + Trunc: !*noTrunc, + }, + Size: *size, + Containers: containers, + } + + psCtx.Write() + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/pull.go b/vendor/github.com/docker/docker/api/client/pull.go new file mode 100644 index 00000000..eb79d38b --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/pull.go @@ -0,0 +1,73 @@ +package client + +import ( + "errors" + "fmt" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" +) + +// CmdPull pulls an image or a repository from the registry. +// +// Usage: docker pull [OPTIONS] IMAGENAME[:TAG|@DIGEST] +func (cli *DockerCli) CmdPull(args ...string) error { + cmd := Cli.Subcmd("pull", []string{"NAME[:TAG|@DIGEST]"}, Cli.DockerCommands["pull"].Description, true) + allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + remote := cmd.Arg(0) + + distributionRef, err := reference.ParseNamed(remote) + if err != nil { + return err + } + if *allTags && !reference.IsNameOnly(distributionRef) { + return errors.New("tag can't be used with --all-tags/-a") + } + + if !*allTags && reference.IsNameOnly(distributionRef) { + distributionRef = reference.WithDefaultTag(distributionRef) + fmt.Fprintf(cli.out, "Using default tag: %s\n", reference.DefaultTag) + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(distributionRef) + if err != nil { + return err + } + + authConfig := cli.resolveAuthConfig(repoInfo.Index) + requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "pull") + + return cli.imagePullPrivileged(authConfig, distributionRef.String(), "", requestPrivilege) +} + +func (cli *DockerCli) imagePullPrivileged(authConfig types.AuthConfig, imageID, tag string, requestPrivilege client.RequestPrivilegeFunc) error { + + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return err + } + options := types.ImagePullOptions{ + ImageID: imageID, + Tag: tag, + RegistryAuth: encodedAuth, + } + + responseBody, err := cli.client.ImagePull(context.Background(), options, requestPrivilege) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) +} diff --git a/vendor/github.com/docker/docker/api/client/push.go b/vendor/github.com/docker/docker/api/client/push.go new file mode 100644 index 00000000..5f51ec27 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/push.go @@ -0,0 +1,72 @@ +package client + +import ( + "errors" + "io" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" +) + +// CmdPush pushes an image or repository to the registry. +// +// Usage: docker push NAME[:TAG] +func (cli *DockerCli) CmdPush(args ...string) error { + cmd := Cli.Subcmd("push", []string{"NAME[:TAG]"}, Cli.DockerCommands["push"].Description, true) + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + ref, err := reference.ParseNamed(cmd.Arg(0)) + if err != nil { + return err + } + + var tag string + switch x := ref.(type) { + case reference.Canonical: + return errors.New("cannot push a digest reference") + case reference.NamedTagged: + tag = x.Tag() + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + // Resolve the Auth config relevant for this server + authConfig := cli.resolveAuthConfig(repoInfo.Index) + + requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "push") + + responseBody, err := cli.imagePushPrivileged(authConfig, ref.Name(), tag, requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) +} + +func (cli *DockerCli) imagePushPrivileged(authConfig types.AuthConfig, imageID, tag string, requestPrivilege client.RequestPrivilegeFunc) (io.ReadCloser, error) { + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return nil, err + } + options := types.ImagePushOptions{ + ImageID: imageID, + Tag: tag, + RegistryAuth: encodedAuth, + } + + return cli.client.ImagePush(context.Background(), options, requestPrivilege) +} diff --git a/vendor/github.com/docker/docker/api/client/rename.go b/vendor/github.com/docker/docker/api/client/rename.go new file mode 100644 index 00000000..68369881 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/rename.go @@ -0,0 +1,34 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRename renames a container. +// +// Usage: docker rename OLD_NAME NEW_NAME +func (cli *DockerCli) CmdRename(args ...string) error { + cmd := Cli.Subcmd("rename", []string{"OLD_NAME NEW_NAME"}, Cli.DockerCommands["rename"].Description, true) + cmd.Require(flag.Exact, 2) + + cmd.ParseFlags(args, true) + + oldName := strings.TrimSpace(cmd.Arg(0)) + newName := strings.TrimSpace(cmd.Arg(1)) + + if oldName == "" || newName == "" { + return fmt.Errorf("Error: Neither old nor new names may be empty") + } + + if err := cli.client.ContainerRename(context.Background(), oldName, newName); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + return fmt.Errorf("Error: failed to rename container named %s", oldName) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/restart.go b/vendor/github.com/docker/docker/api/client/restart.go new file mode 100644 index 00000000..c0a04bd1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/restart.go @@ -0,0 +1,35 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRestart restarts one or more containers. +// +// Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdRestart(args ...string) error { + cmd := Cli.Subcmd("restart", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["restart"].Description, true) + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing the container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerRestart(context.Background(), name, *nSeconds); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/rm.go b/vendor/github.com/docker/docker/api/client/rm.go new file mode 100644 index 00000000..c252b1f7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/rm.go @@ -0,0 +1,56 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" +) + +// CmdRm removes one or more containers. +// +// Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdRm(args ...string) error { + cmd := Cli.Subcmd("rm", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["rm"].Description, true) + v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container") + link := cmd.Bool([]string{"l", "-link"}, false, "Remove the specified link") + force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if name == "" { + return fmt.Errorf("Container name cannot be empty") + } + name = strings.Trim(name, "/") + + if err := cli.removeContainer(name, *v, *link, *force); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} + +func (cli *DockerCli) removeContainer(containerID string, removeVolumes, removeLinks, force bool) error { + options := types.ContainerRemoveOptions{ + ContainerID: containerID, + RemoveVolumes: removeVolumes, + RemoveLinks: removeLinks, + Force: force, + } + if err := cli.client.ContainerRemove(context.Background(), options); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/rmi.go b/vendor/github.com/docker/docker/api/client/rmi.go new file mode 100644 index 00000000..ac1b41db --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/rmi.go @@ -0,0 +1,59 @@ +package client + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" +) + +// CmdRmi removes all images with the specified name(s). +// +// Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] +func (cli *DockerCli) CmdRmi(args ...string) error { + cmd := Cli.Subcmd("rmi", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["rmi"].Description, true) + force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image") + noprune := cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + v := url.Values{} + if *force { + v.Set("force", "1") + } + if *noprune { + v.Set("noprune", "1") + } + + var errs []string + for _, name := range cmd.Args() { + options := types.ImageRemoveOptions{ + ImageID: name, + Force: *force, + PruneChildren: !*noprune, + } + + dels, err := cli.client.ImageRemove(context.Background(), options) + if err != nil { + errs = append(errs, err.Error()) + } else { + for _, del := range dels { + if del.Deleted != "" { + fmt.Fprintf(cli.out, "Deleted: %s\n", del.Deleted) + } else { + fmt.Fprintf(cli.out, "Untagged: %s\n", del.Untagged) + } + } + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/run.go b/vendor/github.com/docker/docker/api/client/run.go new file mode 100644 index 00000000..be0c62cb --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/run.go @@ -0,0 +1,274 @@ +package client + +import ( + "fmt" + "io" + "os" + "runtime" + "strings" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" +) + +const ( + errCmdNotFound = "not found or does not exist." + errCmdCouldNotBeInvoked = "could not be invoked." +) + +func (cid *cidFile) Close() error { + cid.file.Close() + + if !cid.written { + if err := os.Remove(cid.path); err != nil { + return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) + } + } + + return nil +} + +func (cid *cidFile) Write(id string) error { + if _, err := cid.file.Write([]byte(id)); err != nil { + return fmt.Errorf("Failed to write the container ID to the file: %s", err) + } + cid.written = true + return nil +} + +// if container start fails with 'command not found' error, return 127 +// if container start fails with 'command cannot be invoked' error, return 126 +// return 125 for generic docker daemon failures +func runStartContainerErr(err error) error { + trimmedErr := strings.Trim(err.Error(), "Error response from daemon: ") + statusError := Cli.StatusError{StatusCode: 125} + + if strings.HasPrefix(trimmedErr, "Container command") { + if strings.Contains(trimmedErr, errCmdNotFound) { + statusError = Cli.StatusError{StatusCode: 127} + } else if strings.Contains(trimmedErr, errCmdCouldNotBeInvoked) { + statusError = Cli.StatusError{StatusCode: 126} + } + } + + return statusError +} + +// CmdRun runs a command in a new container. +// +// Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] +func (cli *DockerCli) CmdRun(args ...string) error { + cmd := Cli.Subcmd("run", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["run"].Description, true) + + // These are flags not stored in Config/HostConfig + var ( + flAutoRemove = cmd.Bool([]string{"-rm"}, false, "Automatically remove the container when it exits") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID") + flSigProxy = cmd.Bool([]string{"-sig-proxy"}, true, "Proxy received signals to the process") + flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") + flDetachKeys = cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") + flAttach *opts.ListOpts + + ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") + ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") + ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") + ) + + config, hostConfig, networkingConfig, cmd, err := runconfigopts.Parse(cmd, args) + + // just in case the Parse does not exit + if err != nil { + cmd.ReportError(err.Error(), true) + os.Exit(125) + } + + if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { + fmt.Fprintf(cli.err, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n") + } + + if config.Image == "" { + cmd.Usage() + return nil + } + + config.ArgsEscaped = false + + if !*flDetach { + if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { + return err + } + } else { + if fl := cmd.Lookup("-attach"); fl != nil { + flAttach = fl.Value.(*opts.ListOpts) + if flAttach.Len() != 0 { + return ErrConflictAttachDetach + } + } + if *flAutoRemove { + return ErrConflictDetachAutoRemove + } + + config.AttachStdin = false + config.AttachStdout = false + config.AttachStderr = false + config.StdinOnce = false + } + + // Disable flSigProxy when in TTY mode + sigProxy := *flSigProxy + if config.Tty { + sigProxy = false + } + + // Telling the Windows daemon the initial size of the tty during start makes + // a far better user experience rather than relying on subsequent resizes + // to cause things to catch up. + if runtime.GOOS == "windows" { + hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = cli.getTtySize() + } + + createResponse, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + cmd.ReportError(err.Error(), true) + return runStartContainerErr(err) + } + if sigProxy { + sigc := cli.forwardAllSignals(createResponse.ID) + defer signal.StopCatch(sigc) + } + var ( + waitDisplayID chan struct{} + errCh chan error + ) + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchronous to allow the client to write to stdin before having to read the ID + waitDisplayID = make(chan struct{}) + go func() { + defer close(waitDisplayID) + fmt.Fprintf(cli.out, "%s\n", createResponse.ID) + }() + } + if *flAutoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) { + return ErrConflictRestartPolicyAndAutoRemove + } + + if config.AttachStdin || config.AttachStdout || config.AttachStderr { + var ( + out, stderr io.Writer + in io.ReadCloser + ) + if config.AttachStdin { + in = cli.in + } + if config.AttachStdout { + out = cli.out + } + if config.AttachStderr { + if config.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + + if *flDetachKeys != "" { + cli.configFile.DetachKeys = *flDetachKeys + } + + options := types.ContainerAttachOptions{ + ContainerID: createResponse.ID, + Stream: true, + Stdin: config.AttachStdin, + Stdout: config.AttachStdout, + Stderr: config.AttachStderr, + DetachKeys: cli.configFile.DetachKeys, + } + + resp, err := cli.client.ContainerAttach(context.Background(), options) + if err != nil { + return err + } + if in != nil && config.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } + errCh = promise.Go(func() error { + return cli.holdHijackedConnection(config.Tty, in, out, stderr, resp) + }) + } + + if *flAutoRemove { + defer func() { + if err := cli.removeContainer(createResponse.ID, true, false, false); err != nil { + fmt.Fprintf(cli.err, "%v\n", err) + } + }() + } + + //start the container + if err := cli.client.ContainerStart(context.Background(), createResponse.ID); err != nil { + cmd.ReportError(err.Error(), false) + return runStartContainerErr(err) + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut { + if err := cli.monitorTtySize(createResponse.ID, false); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayID + return nil + } + + var status int + + // Attached mode + if *flAutoRemove { + // Autoremove: wait for the container to finish, retrieve + // the exit code and remove the container + if status, err = cli.client.ContainerWait(context.Background(), createResponse.ID); err != nil { + return runStartContainerErr(err) + } + if _, status, err = getExitCode(cli, createResponse.ID); err != nil { + return err + } + } else { + // No Autoremove: Simply retrieve the exit code + if !config.Tty { + // In non-TTY mode, we can't detach, so we must wait for container exit + if status, err = cli.client.ContainerWait(context.Background(), createResponse.ID); err != nil { + return err + } + } else { + // In TTY mode, there is a race: if the process dies too slowly, the state could + // be updated after the getExitCode call and result in the wrong exit code being reported + if _, status, err = getExitCode(cli, createResponse.ID); err != nil { + return err + } + } + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/save.go b/vendor/github.com/docker/docker/api/client/save.go new file mode 100644 index 00000000..4aabf1bd --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/save.go @@ -0,0 +1,42 @@ +package client + +import ( + "errors" + "io" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdSave saves one or more images to a tar archive. +// +// The tar archive is written to STDOUT by default, or written to a file. +// +// Usage: docker save [OPTIONS] IMAGE [IMAGE...] +func (cli *DockerCli) CmdSave(args ...string) error { + cmd := Cli.Subcmd("save", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["save"].Description+" (streamed to STDOUT by default)", true) + outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + if *outfile == "" && cli.isTerminalOut { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + responseBody, err := cli.client.ImageSave(context.Background(), cmd.Args()) + if err != nil { + return err + } + defer responseBody.Close() + + if *outfile == "" { + _, err := io.Copy(cli.out, responseBody) + return err + } + + return copyToFile(*outfile, responseBody) + +} diff --git a/vendor/github.com/docker/docker/api/client/search.go b/vendor/github.com/docker/docker/api/client/search.go new file mode 100644 index 00000000..82deb409 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/search.go @@ -0,0 +1,93 @@ +package client + +import ( + "fmt" + "net/url" + "sort" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + registrytypes "github.com/docker/engine-api/types/registry" +) + +// CmdSearch searches the Docker Hub for images. +// +// Usage: docker search [OPTIONS] TERM +func (cli *DockerCli) CmdSearch(args ...string) error { + cmd := Cli.Subcmd("search", []string{"TERM"}, Cli.DockerCommands["search"].Description, true) + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds") + stars := cmd.Uint([]string{"s", "-stars"}, 0, "Only displays with at least x stars") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + name := cmd.Arg(0) + v := url.Values{} + v.Set("term", name) + + indexInfo, err := registry.ParseSearchIndexInfo(name) + if err != nil { + return err + } + + authConfig := cli.resolveAuthConfig(indexInfo) + requestPrivilege := cli.registryAuthenticationPrivilegedFunc(indexInfo, "search") + + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageSearchOptions{ + Term: name, + RegistryAuth: encodedAuth, + } + + unorderedResults, err := cli.client.ImageSearch(context.Background(), options, requestPrivilege) + if err != nil { + return err + } + + results := searchResultsByStars(unorderedResults) + sort.Sort(results) + + w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) + fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") + for _, res := range results { + if (*automated && !res.IsAutomated) || (int(*stars) > res.StarCount) { + continue + } + desc := strings.Replace(res.Description, "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !*noTrunc && len(desc) > 45 { + desc = stringutils.Truncate(desc, 42) + "..." + } + fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) + if res.IsOfficial { + fmt.Fprint(w, "[OK]") + + } + fmt.Fprint(w, "\t") + if res.IsAutomated || res.IsTrusted { + fmt.Fprint(w, "[OK]") + } + fmt.Fprint(w, "\n") + } + w.Flush() + return nil +} + +// SearchResultsByStars sorts search results in descending order by number of stars. +type searchResultsByStars []registrytypes.SearchResult + +func (r searchResultsByStars) Len() int { return len(r) } +func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount } diff --git a/vendor/github.com/docker/docker/api/client/start.go b/vendor/github.com/docker/docker/api/client/start.go new file mode 100644 index 00000000..1ff2845f --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/start.go @@ -0,0 +1,157 @@ +package client + +import ( + "fmt" + "io" + "os" + "strings" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/engine-api/types" +) + +func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { + sigc := make(chan os.Signal, 128) + signal.CatchAll(sigc) + go func() { + for s := range sigc { + if s == signal.SIGCHLD || s == signal.SIGPIPE { + continue + } + var sig string + for sigStr, sigN := range signal.SignalMap { + if sigN == s { + sig = sigStr + break + } + } + if sig == "" { + fmt.Fprintf(cli.err, "Unsupported signal: %v. Discarding.\n", s) + continue + } + + if err := cli.client.ContainerKill(context.Background(), cid, sig); err != nil { + logrus.Debugf("Error sending signal: %s", err) + } + } + }() + return sigc +} + +// CmdStart starts one or more containers. +// +// Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdStart(args ...string) error { + cmd := Cli.Subcmd("start", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["start"].Description, true) + attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals") + openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN") + detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + if *attach || *openStdin { + // We're going to attach to a container. + // 1. Ensure we only have one container. + if cmd.NArg() > 1 { + return fmt.Errorf("You cannot start and attach multiple containers at once.") + } + + // 2. Attach to the container. + containerID := cmd.Arg(0) + c, err := cli.client.ContainerInspect(context.Background(), containerID) + if err != nil { + return err + } + + if !c.Config.Tty { + sigc := cli.forwardAllSignals(containerID) + defer signal.StopCatch(sigc) + } + + if *detachKeys != "" { + cli.configFile.DetachKeys = *detachKeys + } + + options := types.ContainerAttachOptions{ + ContainerID: containerID, + Stream: true, + Stdin: *openStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: cli.configFile.DetachKeys, + } + + var in io.ReadCloser + if options.Stdin { + in = cli.in + } + + resp, err := cli.client.ContainerAttach(context.Background(), options) + if err != nil { + return err + } + defer resp.Close() + if in != nil && c.Config.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } + + cErr := promise.Go(func() error { + return cli.holdHijackedConnection(c.Config.Tty, in, cli.out, cli.err, resp) + }) + + // 3. Start the container. + if err := cli.client.ContainerStart(context.Background(), containerID); err != nil { + return err + } + + // 4. Wait for attachment to break. + if c.Config.Tty && cli.isTerminalOut { + if err := cli.monitorTtySize(containerID, false); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + if attchErr := <-cErr; attchErr != nil { + return attchErr + } + _, status, err := getExitCode(cli, containerID) + if err != nil { + return err + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + } else { + // We're not going to attach to anything. + // Start as many containers as we want. + return cli.startContainersWithoutAttachments(cmd.Args()) + } + + return nil +} + +func (cli *DockerCli) startContainersWithoutAttachments(containerIDs []string) error { + var failedContainers []string + for _, containerID := range containerIDs { + if err := cli.client.ContainerStart(context.Background(), containerID); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + failedContainers = append(failedContainers, containerID) + } else { + fmt.Fprintf(cli.out, "%s\n", containerID) + } + } + + if len(failedContainers) > 0 { + return fmt.Errorf("Error: failed to start containers: %v", strings.Join(failedContainers, ", ")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/stats.go b/vendor/github.com/docker/docker/api/client/stats.go new file mode 100644 index 00000000..b84ac3e0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/stats.go @@ -0,0 +1,208 @@ +package client + +import ( + "fmt" + "io" + "strings" + "sync" + "text/tabwriter" + "time" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" +) + +// CmdStats displays a live stream of resource usage statistics for one or more containers. +// +// This shows real-time information on CPU usage, memory usage, and network I/O. +// +// Usage: docker stats [OPTIONS] [CONTAINER...] +func (cli *DockerCli) CmdStats(args ...string) error { + cmd := Cli.Subcmd("stats", []string{"[CONTAINER...]"}, Cli.DockerCommands["stats"].Description, true) + all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") + noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result") + + cmd.ParseFlags(args, true) + + names := cmd.Args() + showAll := len(names) == 0 + closeChan := make(chan error) + + // monitorContainerEvents watches for container creation and removal (only + // used when calling `docker stats` without arguments). + monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) { + f := filters.NewArgs() + f.Add("type", "container") + options := types.EventsOptions{ + Filters: f, + } + resBody, err := cli.client.Events(context.Background(), options) + // Whether we successfully subscribed to events or not, we can now + // unblock the main goroutine. + close(started) + if err != nil { + closeChan <- err + return + } + defer resBody.Close() + + decodeEvents(resBody, func(event events.Message, err error) error { + if err != nil { + closeChan <- err + return nil + } + c <- event + return nil + }) + } + + // waitFirst is a WaitGroup to wait first stat data's reach for each container + waitFirst := &sync.WaitGroup{} + + cStats := stats{} + // getContainerList simulates creation event for all previously existing + // containers (only used when calling `docker stats` without arguments). + getContainerList := func() { + options := types.ContainerListOptions{ + All: *all, + } + cs, err := cli.client.ContainerList(context.Background(), options) + if err != nil { + closeChan <- err + } + for _, container := range cs { + s := &containerStats{Name: container.ID[:12]} + if cStats.add(s) { + waitFirst.Add(1) + go s.Collect(cli.client, !*noStream, waitFirst) + } + } + } + + if showAll { + // If no names were specified, start a long running goroutine which + // monitors container events. We make sure we're subscribed before + // retrieving the list of running containers to avoid a race where we + // would "miss" a creation. + started := make(chan struct{}) + eh := eventHandler{handlers: make(map[string]func(events.Message))} + eh.Handle("create", func(e events.Message) { + if *all { + s := &containerStats{Name: e.ID[:12]} + if cStats.add(s) { + waitFirst.Add(1) + go s.Collect(cli.client, !*noStream, waitFirst) + } + } + }) + + eh.Handle("start", func(e events.Message) { + s := &containerStats{Name: e.ID[:12]} + if cStats.add(s) { + waitFirst.Add(1) + go s.Collect(cli.client, !*noStream, waitFirst) + } + }) + + eh.Handle("die", func(e events.Message) { + if !*all { + cStats.remove(e.ID[:12]) + } + }) + + eventChan := make(chan events.Message) + go eh.Watch(eventChan) + go monitorContainerEvents(started, eventChan) + defer close(eventChan) + <-started + + // Start a short-lived goroutine to retrieve the initial list of + // containers. + getContainerList() + } else { + // Artificially send creation events for the containers we were asked to + // monitor (same code path than we use when monitoring all containers). + for _, name := range names { + s := &containerStats{Name: name} + if cStats.add(s) { + waitFirst.Add(1) + go s.Collect(cli.client, !*noStream, waitFirst) + } + } + + // We don't expect any asynchronous errors: closeChan can be closed. + close(closeChan) + + // Do a quick pause to detect any error with the provided list of + // container names. + time.Sleep(1500 * time.Millisecond) + var errs []string + cStats.mu.Lock() + for _, c := range cStats.cs { + c.mu.Lock() + if c.err != nil { + errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err)) + } + c.mu.Unlock() + } + cStats.mu.Unlock() + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, ", ")) + } + } + + // before print to screen, make sure each container get at least one valid stat data + waitFirst.Wait() + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + printHeader := func() { + if !*noStream { + fmt.Fprint(cli.out, "\033[2J") + fmt.Fprint(cli.out, "\033[H") + } + io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\tPIDS\n") + } + + for range time.Tick(500 * time.Millisecond) { + printHeader() + toRemove := []int{} + cStats.mu.Lock() + for i, s := range cStats.cs { + if err := s.Display(w); err != nil && !*noStream { + toRemove = append(toRemove, i) + } + } + for j := len(toRemove) - 1; j >= 0; j-- { + i := toRemove[j] + cStats.cs = append(cStats.cs[:i], cStats.cs[i+1:]...) + } + if len(cStats.cs) == 0 && !showAll { + return nil + } + cStats.mu.Unlock() + w.Flush() + if *noStream { + break + } + select { + case err, ok := <-closeChan: + if ok { + if err != nil { + // this is suppressing "unexpected EOF" in the cli when the + // daemon restarts so it shutdowns cleanly + if err == io.ErrUnexpectedEOF { + return nil + } + return err + } + } + default: + // just skip + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/stats_helpers.go b/vendor/github.com/docker/docker/api/client/stats_helpers.go new file mode 100644 index 00000000..404c3ff1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/stats_helpers.go @@ -0,0 +1,219 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + "github.com/docker/go-units" + "golang.org/x/net/context" +) + +type containerStats struct { + Name string + CPUPercentage float64 + Memory float64 + MemoryLimit float64 + MemoryPercentage float64 + NetworkRx float64 + NetworkTx float64 + BlockRead float64 + BlockWrite float64 + PidsCurrent uint64 + mu sync.RWMutex + err error +} + +type stats struct { + mu sync.Mutex + cs []*containerStats +} + +func (s *stats) add(cs *containerStats) bool { + s.mu.Lock() + defer s.mu.Unlock() + if _, exists := s.isKnownContainer(cs.Name); !exists { + s.cs = append(s.cs, cs) + return true + } + return false +} + +func (s *stats) remove(id string) { + s.mu.Lock() + if i, exists := s.isKnownContainer(id); exists { + s.cs = append(s.cs[:i], s.cs[i+1:]...) + } + s.mu.Unlock() +} + +func (s *stats) isKnownContainer(cid string) (int, bool) { + for i, c := range s.cs { + if c.Name == cid { + return i, true + } + } + return -1, false +} + +func (s *containerStats) Collect(cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { + var ( + getFirst bool + previousCPU uint64 + previousSystem uint64 + u = make(chan error, 1) + ) + + defer func() { + // if error happens and we get nothing of stats, release wait group whatever + if !getFirst { + getFirst = true + waitFirst.Done() + } + }() + + responseBody, err := cli.ContainerStats(context.Background(), s.Name, streamStats) + if err != nil { + s.mu.Lock() + s.err = err + s.mu.Unlock() + return + } + defer responseBody.Close() + + dec := json.NewDecoder(responseBody) + go func() { + for { + var v *types.StatsJSON + if err := dec.Decode(&v); err != nil { + u <- err + return + } + + var memPercent = 0.0 + var cpuPercent = 0.0 + + // MemoryStats.Limit will never be 0 unless the container is not running and we haven't + // got any data from cgroup + if v.MemoryStats.Limit != 0 { + memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 + } + + previousCPU = v.PreCPUStats.CPUUsage.TotalUsage + previousSystem = v.PreCPUStats.SystemUsage + cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v) + blkRead, blkWrite := calculateBlockIO(v.BlkioStats) + s.mu.Lock() + s.CPUPercentage = cpuPercent + s.Memory = float64(v.MemoryStats.Usage) + s.MemoryLimit = float64(v.MemoryStats.Limit) + s.MemoryPercentage = memPercent + s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks) + s.BlockRead = float64(blkRead) + s.BlockWrite = float64(blkWrite) + s.PidsCurrent = v.PidsStats.Current + s.mu.Unlock() + u <- nil + if !streamStats { + return + } + } + }() + for { + select { + case <-time.After(2 * time.Second): + // zero out the values if we have not received an update within + // the specified duration. + s.mu.Lock() + s.CPUPercentage = 0 + s.Memory = 0 + s.MemoryPercentage = 0 + s.MemoryLimit = 0 + s.NetworkRx = 0 + s.NetworkTx = 0 + s.BlockRead = 0 + s.BlockWrite = 0 + s.PidsCurrent = 0 + s.mu.Unlock() + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + case err := <-u: + if err != nil { + s.mu.Lock() + s.err = err + s.mu.Unlock() + return + } + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + } + if !streamStats { + return + } + } +} + +func (s *containerStats) Display(w io.Writer) error { + s.mu.RLock() + defer s.mu.RUnlock() + if s.err != nil { + return s.err + } + fmt.Fprintf(w, "%s\t%.2f%%\t%s / %s\t%.2f%%\t%s / %s\t%s / %s\t%d\n", + s.Name, + s.CPUPercentage, + units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit), + s.MemoryPercentage, + units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx), + units.HumanSize(s.BlockRead), units.HumanSize(s.BlockWrite), + s.PidsCurrent) + return nil +} + +func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { + var ( + cpuPercent = 0.0 + // calculate the change for the cpu usage of the container in between readings + cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) + // calculate the change for the entire system between readings + systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) + ) + + if systemDelta > 0.0 && cpuDelta > 0.0 { + cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 + } + return cpuPercent +} + +func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) { + for _, bioEntry := range blkio.IoServiceBytesRecursive { + switch strings.ToLower(bioEntry.Op) { + case "read": + blkRead = blkRead + bioEntry.Value + case "write": + blkWrite = blkWrite + bioEntry.Value + } + } + return +} + +func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { + var rx, tx float64 + + for _, v := range network { + rx += float64(v.RxBytes) + tx += float64(v.TxBytes) + } + return rx, tx +} diff --git a/vendor/github.com/docker/docker/api/client/stop.go b/vendor/github.com/docker/docker/api/client/stop.go new file mode 100644 index 00000000..23d53447 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/stop.go @@ -0,0 +1,37 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdStop stops one or more containers. +// +// A running container is stopped by first sending SIGTERM and then SIGKILL if the container fails to stop within a grace period (the default is 10 seconds). +// +// Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdStop(args ...string) error { + cmd := Cli.Subcmd("stop", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["stop"].Description+".\nSending SIGTERM and then SIGKILL after a grace period", true) + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing it") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerStop(context.Background(), name, *nSeconds); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/tag.go b/vendor/github.com/docker/docker/api/client/tag.go new file mode 100644 index 00000000..1d87e437 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/tag.go @@ -0,0 +1,46 @@ +package client + +import ( + "errors" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" +) + +// CmdTag tags an image into a repository. +// +// Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] +func (cli *DockerCli) CmdTag(args ...string) error { + cmd := Cli.Subcmd("tag", []string{"IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]"}, Cli.DockerCommands["tag"].Description, true) + force := cmd.Bool([]string{"#f", "#-force"}, false, "Force the tagging even if there's a conflict") + cmd.Require(flag.Exact, 2) + + cmd.ParseFlags(args, true) + + ref, err := reference.ParseNamed(cmd.Arg(1)) + if err != nil { + return err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + var tag string + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } + + options := types.ImageTagOptions{ + ImageID: cmd.Arg(0), + RepositoryName: ref.Name(), + Tag: tag, + Force: *force, + } + + return cli.client.ImageTag(context.Background(), options) +} diff --git a/vendor/github.com/docker/docker/api/client/top.go b/vendor/github.com/docker/docker/api/client/top.go new file mode 100644 index 00000000..bb2ec46c --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/top.go @@ -0,0 +1,41 @@ +package client + +import ( + "fmt" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdTop displays the running processes of a container. +// +// Usage: docker top CONTAINER +func (cli *DockerCli) CmdTop(args ...string) error { + cmd := Cli.Subcmd("top", []string{"CONTAINER [ps OPTIONS]"}, Cli.DockerCommands["top"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var arguments []string + if cmd.NArg() > 1 { + arguments = cmd.Args()[1:] + } + + procList, err := cli.client.ContainerTop(context.Background(), cmd.Arg(0), arguments) + if err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) + + for _, proc := range procList.Processes { + fmt.Fprintln(w, strings.Join(proc, "\t")) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/unpause.go b/vendor/github.com/docker/docker/api/client/unpause.go new file mode 100644 index 00000000..b8630b1f --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/unpause.go @@ -0,0 +1,34 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdUnpause unpauses all processes within a container, for one or more containers. +// +// Usage: docker unpause CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdUnpause(args ...string) error { + cmd := Cli.Subcmd("unpause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["unpause"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerUnpause(context.Background(), name); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/update.go b/vendor/github.com/docker/docker/api/client/update.go new file mode 100644 index 00000000..a2f9e534 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/update.go @@ -0,0 +1,117 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-units" +) + +// CmdUpdate updates resources of one or more containers. +// +// Usage: docker update [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdUpdate(args ...string) error { + cmd := Cli.Subcmd("update", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["update"].Description, true) + flBlkioWeight := cmd.Uint16([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") + flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flCpusetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flCpusetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") + flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") + flMemoryReservation := cmd.String([]string{"-memory-reservation"}, "", "Memory soft limit") + flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flKernelMemory := cmd.String([]string{"-kernel-memory"}, "", "Kernel memory limit") + flRestartPolicy := cmd.String([]string{"-restart"}, "", "Restart policy to apply when a container exits") + + cmd.Require(flag.Min, 1) + cmd.ParseFlags(args, true) + if cmd.NFlag() == 0 { + return fmt.Errorf("You must provide one or more flags when using this command.") + } + + var err error + var flMemory int64 + if *flMemoryString != "" { + flMemory, err = units.RAMInBytes(*flMemoryString) + if err != nil { + return err + } + } + + var memoryReservation int64 + if *flMemoryReservation != "" { + memoryReservation, err = units.RAMInBytes(*flMemoryReservation) + if err != nil { + return err + } + } + + var memorySwap int64 + if *flMemorySwap != "" { + if *flMemorySwap == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(*flMemorySwap) + if err != nil { + return err + } + } + } + + var kernelMemory int64 + if *flKernelMemory != "" { + kernelMemory, err = units.RAMInBytes(*flKernelMemory) + if err != nil { + return err + } + } + + var restartPolicy container.RestartPolicy + if *flRestartPolicy != "" { + restartPolicy, err = opts.ParseRestartPolicy(*flRestartPolicy) + if err != nil { + return err + } + } + + resources := container.Resources{ + BlkioWeight: *flBlkioWeight, + CpusetCpus: *flCpusetCpus, + CpusetMems: *flCpusetMems, + CPUShares: *flCPUShares, + Memory: flMemory, + MemoryReservation: memoryReservation, + MemorySwap: memorySwap, + KernelMemory: kernelMemory, + CPUPeriod: *flCPUPeriod, + CPUQuota: *flCPUQuota, + } + + updateConfig := container.UpdateConfig{ + Resources: resources, + RestartPolicy: restartPolicy, + } + + names := cmd.Args() + var errs []string + for _, name := range names { + if err := cli.client.ContainerUpdate(context.Background(), name, updateConfig); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/utils.go b/vendor/github.com/docker/docker/api/client/utils.go new file mode 100644 index 00000000..4deee224 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/utils.go @@ -0,0 +1,202 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + gosignal "os/signal" + "path/filepath" + "runtime" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + registrytypes "github.com/docker/engine-api/types/registry" +) + +func (cli *DockerCli) electAuthServer() string { + // The daemon `/info` endpoint informs us of the default registry being + // used. This is essential in cross-platforms environment, where for + // example a Linux client might be interacting with a Windows daemon, hence + // the default registry URL might be Windows specific. + serverAddress := registry.IndexServer + if info, err := cli.client.Info(context.Background()); err != nil { + fmt.Fprintf(cli.out, "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) + } else { + serverAddress = info.IndexServerAddress + } + return serverAddress +} + +// encodeAuthToBase64 serializes the auth configuration as JSON base64 payload +func encodeAuthToBase64(authConfig types.AuthConfig) (string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +func (cli *DockerCli) registryAuthenticationPrivilegedFunc(index *registrytypes.IndexInfo, cmdName string) client.RequestPrivilegeFunc { + return func() (string, error) { + fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName) + indexServer := registry.GetAuthConfigKey(index) + authConfig, err := cli.configureAuth("", "", indexServer, false) + if err != nil { + return "", err + } + return encodeAuthToBase64(authConfig) + } +} + +func (cli *DockerCli) resizeTty(id string, isExec bool) { + height, width := cli.getTtySize() + cli.resizeTtyTo(id, height, width, isExec) +} + +func (cli *DockerCli) resizeTtyTo(id string, height, width int, isExec bool) { + if height == 0 && width == 0 { + return + } + + options := types.ResizeOptions{ + ID: id, + Height: height, + Width: width, + } + + var err error + if isExec { + err = cli.client.ContainerExecResize(context.Background(), options) + } else { + err = cli.client.ContainerResize(context.Background(), options) + } + + if err != nil { + logrus.Debugf("Error resize: %s", err) + } +} + +// getExitCode perform an inspect on the container. It returns +// the running state and the exit code. +func getExitCode(cli *DockerCli, containerID string) (bool, int, error) { + c, err := cli.client.ContainerInspect(context.Background(), containerID) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != client.ErrConnectionFailed { + return false, -1, err + } + return false, -1, nil + } + + return c.State.Running, c.State.ExitCode, nil +} + +// getExecExitCode perform an inspect on the exec command. It returns +// the running state and the exit code. +func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) { + resp, err := cli.client.ContainerExecInspect(context.Background(), execID) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != client.ErrConnectionFailed { + return false, -1, err + } + return false, -1, nil + } + + return resp.Running, resp.ExitCode, nil +} + +func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { + cli.resizeTty(id, isExec) + + if runtime.GOOS == "windows" { + go func() { + prevH, prevW := cli.getTtySize() + for { + time.Sleep(time.Millisecond * 250) + h, w := cli.getTtySize() + + if prevW != w || prevH != h { + cli.resizeTty(id, isExec) + } + prevH = h + prevW = w + } + }() + } else { + sigchan := make(chan os.Signal, 1) + gosignal.Notify(sigchan, signal.SIGWINCH) + go func() { + for range sigchan { + cli.resizeTty(id, isExec) + } + }() + } + return nil +} + +func (cli *DockerCli) getTtySize() (int, int) { + if !cli.isTerminalOut { + return 0, 0 + } + ws, err := term.GetWinsize(cli.outFd) + if err != nil { + logrus.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return int(ws.Height), int(ws.Width) +} + +func copyToFile(outfile string, r io.Reader) error { + tmpFile, err := ioutil.TempFile(filepath.Dir(outfile), ".docker_temp_") + if err != nil { + return err + } + + tmpPath := tmpFile.Name() + + _, err = io.Copy(tmpFile, r) + tmpFile.Close() + + if err != nil { + os.Remove(tmpPath) + return err + } + + if err = os.Rename(tmpPath, outfile); err != nil { + os.Remove(tmpPath) + return err + } + + return nil +} + +// resolveAuthConfig is like registry.ResolveAuthConfig, but if using the +// default index, it uses the default index name for the daemon's platform, +// not the client's platform. +func (cli *DockerCli) resolveAuthConfig(index *registrytypes.IndexInfo) types.AuthConfig { + configKey := index.Name + if index.Official { + configKey = cli.electAuthServer() + } + + a, _ := getCredentials(cli.configFile, configKey) + return a +} + +func (cli *DockerCli) retrieveAuthConfigs() map[string]types.AuthConfig { + acs, _ := getAllCredentials(cli.configFile) + return acs +} diff --git a/vendor/github.com/docker/docker/api/client/version.go b/vendor/github.com/docker/docker/api/client/version.go new file mode 100644 index 00000000..ebec4def --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/version.go @@ -0,0 +1,95 @@ +package client + +import ( + "runtime" + "text/template" + "time" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/dockerversion" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils" + "github.com/docker/docker/utils/templates" + "github.com/docker/engine-api/types" +) + +var versionTemplate = `Client: + Version: {{.Client.Version}} + API version: {{.Client.APIVersion}} + Go version: {{.Client.GoVersion}} + Git commit: {{.Client.GitCommit}} + Built: {{.Client.BuildTime}} + OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .Client.Experimental}} + Experimental: {{.Client.Experimental}}{{end}}{{if .ServerOK}} + +Server: + Version: {{.Server.Version}} + API version: {{.Server.APIVersion}} + Go version: {{.Server.GoVersion}} + Git commit: {{.Server.GitCommit}} + Built: {{.Server.BuildTime}} + OS/Arch: {{.Server.Os}}/{{.Server.Arch}}{{if .Server.Experimental}} + Experimental: {{.Server.Experimental}}{{end}}{{end}}` + +// CmdVersion shows Docker version information. +// +// Available version information is shown for: client Docker version, client API version, client Go version, client Git commit, client OS/Arch, server Docker version, server API version, server Go version, server Git commit, and server OS/Arch. +// +// Usage: docker version +func (cli *DockerCli) CmdVersion(args ...string) (err error) { + cmd := Cli.Subcmd("version", nil, Cli.DockerCommands["version"].Description, true) + tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template") + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + templateFormat := versionTemplate + if *tmplStr != "" { + templateFormat = *tmplStr + } + + var tmpl *template.Template + if tmpl, err = templates.Parse(templateFormat); err != nil { + return Cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + + vd := types.VersionResponse{ + Client: &types.Version{ + Version: dockerversion.Version, + APIVersion: cli.client.ClientVersion(), + GoVersion: runtime.Version(), + GitCommit: dockerversion.GitCommit, + BuildTime: dockerversion.BuildTime, + Os: runtime.GOOS, + Arch: runtime.GOARCH, + Experimental: utils.ExperimentalBuild(), + }, + } + + serverVersion, err := cli.client.ServerVersion(context.Background()) + if err == nil { + vd.Server = &serverVersion + } + + // first we need to make BuildTime more human friendly + t, errTime := time.Parse(time.RFC3339Nano, vd.Client.BuildTime) + if errTime == nil { + vd.Client.BuildTime = t.Format(time.ANSIC) + } + + if vd.ServerOK() { + t, errTime = time.Parse(time.RFC3339Nano, vd.Server.BuildTime) + if errTime == nil { + vd.Server.BuildTime = t.Format(time.ANSIC) + } + } + + if err2 := tmpl.Execute(cli.out, vd); err2 != nil && err == nil { + err = err2 + } + cli.out.Write([]byte{'\n'}) + return err +} diff --git a/vendor/github.com/docker/docker/api/client/volume.go b/vendor/github.com/docker/docker/api/client/volume.go new file mode 100644 index 00000000..37e623fb --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/volume.go @@ -0,0 +1,177 @@ +package client + +import ( + "fmt" + "sort" + "text/tabwriter" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" +) + +// CmdVolume is the parent subcommand for all volume commands +// +// Usage: docker volume +func (cli *DockerCli) CmdVolume(args ...string) error { + description := Cli.DockerCommands["volume"].Description + "\n\nCommands:\n" + commands := [][]string{ + {"create", "Create a volume"}, + {"inspect", "Return low-level information on a volume"}, + {"ls", "List volumes"}, + {"rm", "Remove a volume"}, + } + + for _, cmd := range commands { + description += fmt.Sprintf(" %-25.25s%s\n", cmd[0], cmd[1]) + } + + description += "\nRun 'docker volume COMMAND --help' for more information on a command" + cmd := Cli.Subcmd("volume", []string{"[COMMAND]"}, description, false) + + cmd.Require(flag.Exact, 0) + err := cmd.ParseFlags(args, true) + cmd.Usage() + return err +} + +// CmdVolumeLs outputs a list of Docker volumes. +// +// Usage: docker volume ls [OPTIONS] +func (cli *DockerCli) CmdVolumeLs(args ...string) error { + cmd := Cli.Subcmd("volume ls", nil, "List volumes", true) + + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display volume names") + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')") + + cmd.Require(flag.Exact, 0) + cmd.ParseFlags(args, true) + + volFilterArgs := filters.NewArgs() + for _, f := range flFilter.GetAll() { + var err error + volFilterArgs, err = filters.ParseFlag(f, volFilterArgs) + if err != nil { + return err + } + } + + volumes, err := cli.client.VolumeList(context.Background(), volFilterArgs) + if err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + for _, warn := range volumes.Warnings { + fmt.Fprintln(cli.err, warn) + } + fmt.Fprintf(w, "DRIVER \tVOLUME NAME") + fmt.Fprintf(w, "\n") + } + + sort.Sort(byVolumeName(volumes.Volumes)) + for _, vol := range volumes.Volumes { + if *quiet { + fmt.Fprintln(w, vol.Name) + continue + } + fmt.Fprintf(w, "%s\t%s\n", vol.Driver, vol.Name) + } + w.Flush() + return nil +} + +type byVolumeName []*types.Volume + +func (r byVolumeName) Len() int { return len(r) } +func (r byVolumeName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byVolumeName) Less(i, j int) bool { + return r[i].Name < r[j].Name +} + +// CmdVolumeInspect displays low-level information on one or more volumes. +// +// Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...] +func (cli *DockerCli) CmdVolumeInspect(args ...string) error { + cmd := Cli.Subcmd("volume inspect", []string{"VOLUME [VOLUME...]"}, "Return low-level information on a volume", true) + tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") + + cmd.Require(flag.Min, 1) + cmd.ParseFlags(args, true) + + if err := cmd.Parse(args); err != nil { + return nil + } + + inspectSearcher := func(name string) (interface{}, []byte, error) { + i, err := cli.client.VolumeInspect(context.Background(), name) + return i, nil, err + } + + return cli.inspectElements(*tmplStr, cmd.Args(), inspectSearcher) +} + +// CmdVolumeCreate creates a new volume. +// +// Usage: docker volume create [OPTIONS] +func (cli *DockerCli) CmdVolumeCreate(args ...string) error { + cmd := Cli.Subcmd("volume create", nil, "Create a volume", true) + flDriver := cmd.String([]string{"d", "-driver"}, "local", "Specify volume driver name") + flName := cmd.String([]string{"-name"}, "", "Specify volume name") + + flDriverOpts := opts.NewMapOpts(nil, nil) + cmd.Var(flDriverOpts, []string{"o", "-opt"}, "Set driver specific options") + + flLabels := opts.NewListOpts(nil) + cmd.Var(&flLabels, []string{"-label"}, "Set metadata for a volume") + + cmd.Require(flag.Exact, 0) + cmd.ParseFlags(args, true) + + volReq := types.VolumeCreateRequest{ + Driver: *flDriver, + DriverOpts: flDriverOpts.GetAll(), + Name: *flName, + Labels: runconfigopts.ConvertKVStringsToMap(flLabels.GetAll()), + } + + vol, err := cli.client.VolumeCreate(context.Background(), volReq) + if err != nil { + return err + } + + fmt.Fprintf(cli.out, "%s\n", vol.Name) + return nil +} + +// CmdVolumeRm removes one or more volumes. +// +// Usage: docker volume rm VOLUME [VOLUME...] +func (cli *DockerCli) CmdVolumeRm(args ...string) error { + cmd := Cli.Subcmd("volume rm", []string{"VOLUME [VOLUME...]"}, "Remove a volume", true) + cmd.Require(flag.Min, 1) + cmd.ParseFlags(args, true) + + var status = 0 + + for _, name := range cmd.Args() { + if err := cli.client.VolumeRemove(context.Background(), name); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + fmt.Fprintf(cli.out, "%s\n", name) + } + + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/wait.go b/vendor/github.com/docker/docker/api/client/wait.go new file mode 100644 index 00000000..609cd3be --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/wait.go @@ -0,0 +1,37 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdWait blocks until a container stops, then prints its exit code. +// +// If more than one container is specified, this will wait synchronously on each container. +// +// Usage: docker wait CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdWait(args ...string) error { + cmd := Cli.Subcmd("wait", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["wait"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + status, err := cli.client.ContainerWait(context.Background(), name) + if err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%d\n", status) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go new file mode 100644 index 00000000..63560c6d --- /dev/null +++ b/vendor/github.com/docker/docker/api/common.go @@ -0,0 +1,146 @@ +package api + +import ( + "fmt" + "mime" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/version" + "github.com/docker/engine-api/types" + "github.com/docker/libtrust" +) + +// Common constants for daemon and client. +const ( + // Version of Current REST API + DefaultVersion version.Version = "1.23" + + // MinVersion represents Minimum REST API version supported + MinVersion version.Version = "1.12" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier string = "scratch" +) + +// byPortInfo is a temporary type used to sort types.Port by its fields +type byPortInfo []types.Port + +func (r byPortInfo) Len() int { return len(r) } +func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortInfo) Less(i, j int) bool { + if r[i].PrivatePort != r[j].PrivatePort { + return r[i].PrivatePort < r[j].PrivatePort + } + + if r[i].IP != r[j].IP { + return r[i].IP < r[j].IP + } + + if r[i].PublicPort != r[j].PublicPort { + return r[i].PublicPort < r[j].PublicPort + } + + return r[i].Type < r[j].Type +} + +// DisplayablePorts returns formatted string representing open ports of container +// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" +// it's used by command 'docker ps' +func DisplayablePorts(ports []types.Port) string { + type portGroup struct { + first int + last int + } + groupMap := make(map[string]*portGroup) + var result []string + var hostMappings []string + var groupMapKeys []string + sort.Sort(byPortInfo(ports)) + for _, port := range ports { + current := port.PrivatePort + portKey := port.Type + if port.IP != "" { + if port.PublicPort != current { + hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + continue + } + portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) + } + group := groupMap[portKey] + + if group == nil { + groupMap[portKey] = &portGroup{first: current, last: current} + // record order that groupMap keys are created + groupMapKeys = append(groupMapKeys, portKey) + continue + } + if current == (group.last + 1) { + group.last = current + continue + } + + result = append(result, formGroup(portKey, group.first, group.last)) + groupMap[portKey] = &portGroup{first: current, last: current} + } + for _, portKey := range groupMapKeys { + g := groupMap[portKey] + result = append(result, formGroup(portKey, g.first, g.last)) + } + result = append(result, hostMappings...) + return strings.Join(result, ", ") +} + +func formGroup(key string, start, last int) string { + parts := strings.Split(key, "/") + groupType := parts[0] + var ip string + if len(parts) > 1 { + ip = parts[0] + groupType = parts[1] + } + group := strconv.Itoa(start) + if start != last { + group = fmt.Sprintf("%s-%d", group, last) + } + if ip != "" { + group = fmt.Sprintf("%s:%s->%s", ip, group, group) + } + return fmt.Sprintf("%s/%s", group, groupType) +} + +// MatchesContentType validates the content type against the expected one +func MatchesContentType(contentType, expectedType string) bool { + mimetype, _, err := mime.ParseMediaType(contentType) + if err != nil { + logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) + } + return err == nil && mimetype == expectedType +} + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) + } + return trustKey, nil +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/errors.go b/vendor/github.com/docker/docker/api/server/httputils/errors.go new file mode 100644 index 00000000..cf8d2ae6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/errors.go @@ -0,0 +1,70 @@ +package httputils + +import ( + "net/http" + "strings" + + "github.com/Sirupsen/logrus" +) + +// httpStatusError is an interface +// that errors with custom status codes +// implement to tell the api layer +// which response status to set. +type httpStatusError interface { + HTTPErrorStatusCode() int +} + +// inputValidationError is an interface +// that errors generated by invalid +// inputs can implement to tell the +// api layer to set a 400 status code +// in the response. +type inputValidationError interface { + IsValidationError() bool +} + +// WriteError decodes a specific docker error and sends it in the response. +func WriteError(w http.ResponseWriter, err error) { + if err == nil || w == nil { + logrus.WithFields(logrus.Fields{"error": err, "writer": w}).Error("unexpected HTTP error handling") + return + } + + var statusCode int + errMsg := err.Error() + + switch e := err.(type) { + case httpStatusError: + statusCode = e.HTTPErrorStatusCode() + case inputValidationError: + statusCode = http.StatusBadRequest + default: + // FIXME: this is brittle and should not be necessary, but we still need to identify if + // there are errors falling back into this logic. + // If we need to differentiate between different possible error types, + // we should create appropriate error types that implement the httpStatusError interface. + errStr := strings.ToLower(errMsg) + for keyword, status := range map[string]int{ + "not found": http.StatusNotFound, + "no such": http.StatusNotFound, + "bad parameter": http.StatusBadRequest, + "conflict": http.StatusConflict, + "impossible": http.StatusNotAcceptable, + "wrong login/password": http.StatusUnauthorized, + "unauthorized": http.StatusUnauthorized, + "hasn't been activated": http.StatusForbidden, + } { + if strings.Contains(errStr, keyword) { + statusCode = status + break + } + } + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + http.Error(w, errMsg, statusCode) +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/form.go b/vendor/github.com/docker/docker/api/server/httputils/form.go new file mode 100644 index 00000000..20188c12 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/form.go @@ -0,0 +1,73 @@ +package httputils + +import ( + "fmt" + "net/http" + "path/filepath" + "strconv" + "strings" +) + +// BoolValue transforms a form value in different formats into a boolean type. +func BoolValue(r *http.Request, k string) bool { + s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) + return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") +} + +// BoolValueOrDefault returns the default bool passed if the query param is +// missing, otherwise it's just a proxy to boolValue above +func BoolValueOrDefault(r *http.Request, k string, d bool) bool { + if _, ok := r.Form[k]; !ok { + return d + } + return BoolValue(r, k) +} + +// Int64ValueOrZero parses a form value into an int64 type. +// It returns 0 if the parsing fails. +func Int64ValueOrZero(r *http.Request, k string) int64 { + val, err := Int64ValueOrDefault(r, k, 0) + if err != nil { + return 0 + } + return val +} + +// Int64ValueOrDefault parses a form value into an int64 type. If there is an +// error, returns the error. If there is no value returns the default value. +func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { + if r.Form.Get(field) != "" { + value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) + if err != nil { + return value, err + } + return value, nil + } + return def, nil +} + +// ArchiveOptions stores archive information for different operations. +type ArchiveOptions struct { + Name string + Path string +} + +// ArchiveFormValues parses form values and turns them into ArchiveOptions. +// It fails if the archive name and path are not in the request. +func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { + if err := ParseForm(r); err != nil { + return ArchiveOptions{}, err + } + + name := vars["name"] + path := filepath.FromSlash(r.Form.Get("path")) + + switch { + case name == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty") + case path == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty") + } + + return ArchiveOptions{name, path}, nil +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/httputils.go b/vendor/github.com/docker/docker/api/server/httputils/httputils.go new file mode 100644 index 00000000..59ee0308 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/httputils.go @@ -0,0 +1,107 @@ +package httputils + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api" + "github.com/docker/docker/pkg/version" +) + +// APIVersionKey is the client's requested API version. +const APIVersionKey = "api-version" + +// UAStringKey is used as key type for user-agent string in net/context struct +const UAStringKey = "upstream-user-agent" + +// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. +// Any function that has the appropriate signature can be registered as a API endpoint (e.g. getVersion). +type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +// HijackConnection interrupts the http response writer to get the +// underlying connection and operate with it. +func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +// CloseStreams ensures that a list for http streams are properly closed. +func CloseStreams(streams ...interface{}) { + for _, stream := range streams { + if tcpc, ok := stream.(interface { + CloseWrite() error + }); ok { + tcpc.CloseWrite() + } else if closer, ok := stream.(io.Closer); ok { + closer.Close() + } + } +} + +// CheckForJSON makes sure that the request's Content-Type is application/json. +func CheckForJSON(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if api.MatchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +// ParseForm ensures the request form is parsed even with invalid content types. +// If we don't do this, POST method without Content-type (even with empty body) will fail. +func ParseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// ParseMultipartForm ensures the request form is parsed, even with invalid content types. +func ParseMultipartForm(r *http.Request) error { + if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// WriteJSON writes the value v to the http response stream as json with standard json encoding. +func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + return json.NewEncoder(w).Encode(v) +} + +// VersionFromContext returns an API version from the context using APIVersionKey. +// It panics if the context value does not have version.Version type. +func VersionFromContext(ctx context.Context) (ver version.Version) { + if ctx == nil { + return + } + val := ctx.Value(APIVersionKey) + if val == nil { + return + } + return val.(version.Version) +} diff --git a/vendor/github.com/docker/docker/api/server/middleware.go b/vendor/github.com/docker/docker/api/server/middleware.go new file mode 100644 index 00000000..2622bf1b --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware.go @@ -0,0 +1,41 @@ +package server + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/authorization" +) + +// handleWithGlobalMiddlwares wraps the handler function for a request with +// the server's global middlewares. The order of the middlewares is backwards, +// meaning that the first in the list will be evaluated last. +func (s *Server) handleWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { + next := handler + + handleVersion := middleware.NewVersionMiddleware(dockerversion.Version, api.DefaultVersion, api.MinVersion) + next = handleVersion(next) + + if s.cfg.EnableCors { + handleCORS := middleware.NewCORSMiddleware(s.cfg.CorsHeaders) + next = handleCORS(next) + } + + handleUserAgent := middleware.NewUserAgentMiddleware(s.cfg.Version) + next = handleUserAgent(next) + + // Only want this on debug level + if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { + next = middleware.DebugRequestMiddleware(next) + } + + if len(s.cfg.AuthorizationPluginNames) > 0 { + s.authZPlugins = authorization.NewPlugins(s.cfg.AuthorizationPluginNames) + handleAuthorization := middleware.NewAuthorizationMiddleware(s.authZPlugins) + next = handleAuthorization(next) + } + + return next +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/authorization.go b/vendor/github.com/docker/docker/api/server/middleware/authorization.go new file mode 100644 index 00000000..cbfa99e7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/authorization.go @@ -0,0 +1,42 @@ +package middleware + +import ( + "net/http" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/authorization" + "golang.org/x/net/context" +) + +// NewAuthorizationMiddleware creates a new Authorization middleware. +func NewAuthorizationMiddleware(plugins []authorization.Plugin) Middleware { + return func(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // FIXME: fill when authN gets in + // User and UserAuthNMethod are taken from AuthN plugins + // Currently tracked in https://github.com/docker/docker/pull/13994 + user := "" + userAuthNMethod := "" + authCtx := authorization.NewCtx(plugins, user, userAuthNMethod, r.Method, r.RequestURI) + + if err := authCtx.AuthZRequest(w, r); err != nil { + logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + rw := authorization.NewResponseModifier(w) + + if err := handler(ctx, rw, r, vars); err != nil { + logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + if err := authCtx.AuthZResponse(rw, r); err != nil { + logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + return nil + } + } +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/cors.go b/vendor/github.com/docker/docker/api/server/middleware/cors.go new file mode 100644 index 00000000..de21897d --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/cors.go @@ -0,0 +1,33 @@ +package middleware + +import ( + "net/http" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// NewCORSMiddleware creates a new CORS middleware. +func NewCORSMiddleware(defaultHeaders string) Middleware { + return func(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" + // otherwise, all head values will be passed to HTTP handler + corsHeaders := defaultHeaders + if corsHeaders == "" { + corsHeaders = "*" + } + + writeCorsHeaders(w, r, corsHeaders) + return handler(ctx, w, r, vars) + } + } +} + +func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string) { + logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) + w.Header().Add("Access-Control-Allow-Origin", corsHeaders) + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") + w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/debug.go b/vendor/github.com/docker/docker/api/server/middleware/debug.go new file mode 100644 index 00000000..be7056f6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/debug.go @@ -0,0 +1,56 @@ +package middleware + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +// DebugRequestMiddleware dumps the request to logger +func DebugRequestMiddleware(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + logrus.Debugf("Calling %s %s", r.Method, r.RequestURI) + + if r.Method != "POST" { + return handler(ctx, w, r, vars) + } + if err := httputils.CheckForJSON(r); err != nil { + return handler(ctx, w, r, vars) + } + maxBodySize := 4096 // 4KB + if r.ContentLength > int64(maxBodySize) { + return handler(ctx, w, r, vars) + } + + body := r.Body + bufReader := bufio.NewReaderSize(body, maxBodySize) + r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + b, err := bufReader.Peek(maxBodySize) + if err != io.EOF { + // either there was an error reading, or the buffer is full (in which case the request is too large) + return handler(ctx, w, r, vars) + } + + var postForm map[string]interface{} + if err := json.Unmarshal(b, &postForm); err == nil { + if _, exists := postForm["password"]; exists { + postForm["password"] = "*****" + } + formStr, errMarshal := json.Marshal(postForm) + if errMarshal == nil { + logrus.Debugf("form data: %s", string(formStr)) + } else { + logrus.Debugf("form data: %q", postForm) + } + } + + return handler(ctx, w, r, vars) + } +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/middleware.go b/vendor/github.com/docker/docker/api/server/middleware/middleware.go new file mode 100644 index 00000000..588331ae --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/middleware.go @@ -0,0 +1,7 @@ +package middleware + +import "github.com/docker/docker/api/server/httputils" + +// Middleware is an adapter to allow the use of ordinary functions as Docker API filters. +// Any function that has the appropriate signature can be registered as a middleware. +type Middleware func(handler httputils.APIFunc) httputils.APIFunc diff --git a/vendor/github.com/docker/docker/api/server/middleware/user_agent.go b/vendor/github.com/docker/docker/api/server/middleware/user_agent.go new file mode 100644 index 00000000..188196bf --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/user_agent.go @@ -0,0 +1,37 @@ +package middleware + +import ( + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/version" + "golang.org/x/net/context" +) + +// NewUserAgentMiddleware creates a new UserAgent middleware. +func NewUserAgentMiddleware(versionCheck string) Middleware { + serverVersion := version.Version(versionCheck) + + return func(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + ctx = context.WithValue(ctx, httputils.UAStringKey, r.Header.Get("User-Agent")) + + if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { + userAgent := strings.Split(r.Header.Get("User-Agent"), "/") + + // v1.20 onwards includes the GOOS of the client after the version + // such as Docker/1.7.0 (linux) + if len(userAgent) == 2 && strings.Contains(userAgent[1], " ") { + userAgent[1] = strings.Split(userAgent[1], " ")[0] + } + + if len(userAgent) == 2 && !serverVersion.Equal(version.Version(userAgent[1])) { + logrus.Debugf("Client and server don't have the same version (client: %s, server: %s)", userAgent[1], serverVersion) + } + } + return handler(ctx, w, r, vars) + } + } +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/version.go b/vendor/github.com/docker/docker/api/server/middleware/version.go new file mode 100644 index 00000000..41d518bc --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/version.go @@ -0,0 +1,45 @@ +package middleware + +import ( + "fmt" + "net/http" + "runtime" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/version" + "golang.org/x/net/context" +) + +type badRequestError struct { + error +} + +func (badRequestError) HTTPErrorStatusCode() int { + return http.StatusBadRequest +} + +// NewVersionMiddleware creates a new Version middleware. +func NewVersionMiddleware(versionCheck string, defaultVersion, minVersion version.Version) Middleware { + serverVersion := version.Version(versionCheck) + + return func(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + apiVersion := version.Version(vars["version"]) + if apiVersion == "" { + apiVersion = defaultVersion + } + + if apiVersion.GreaterThan(defaultVersion) { + return badRequestError{fmt.Errorf("client is newer than server (client API version: %s, server API version: %s)", apiVersion, defaultVersion)} + } + if apiVersion.LessThan(minVersion) { + return badRequestError{fmt.Errorf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", apiVersion, minVersion)} + } + + header := fmt.Sprintf("Docker/%s (%s)", serverVersion, runtime.GOOS) + w.Header().Set("Server", header) + ctx = context.WithValue(ctx, httputils.APIVersionKey, apiVersion) + return handler(ctx, w, r, vars) + } + } +} diff --git a/vendor/github.com/docker/docker/api/server/profiler.go b/vendor/github.com/docker/docker/api/server/profiler.go new file mode 100644 index 00000000..3c0dfd08 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/profiler.go @@ -0,0 +1,40 @@ +package server + +import ( + "expvar" + "fmt" + "net/http" + "net/http/pprof" + + "github.com/gorilla/mux" +) + +const debugPathPrefix = "/debug/" + +func profilerSetup(mainRouter *mux.Router) { + var r = mainRouter.PathPrefix(debugPathPrefix).Subrouter() + r.HandleFunc("/vars", expVars) + r.HandleFunc("/pprof/", pprof.Index) + r.HandleFunc("/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/pprof/profile", pprof.Profile) + r.HandleFunc("/pprof/symbol", pprof.Symbol) + r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP) + r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP) + r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) + r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) +} + +// Replicated from expvar.go as not public. +func expVars(w http.ResponseWriter, r *http.Request) { + first := true + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} diff --git a/vendor/github.com/docker/docker/api/server/router/build/backend.go b/vendor/github.com/docker/docker/api/server/router/build/backend.go new file mode 100644 index 00000000..839f3160 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/build/backend.go @@ -0,0 +1,20 @@ +package build + +import ( + "io" + + "github.com/docker/docker/builder" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. +type Backend interface { + // Build builds a Docker image referenced by an imageID string. + // + // Note: Tagging an image should not be done by a Builder, it should instead be done + // by the caller. + // + // TODO: make this return a reference instead of string + Build(clientCtx context.Context, config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer, clientGone <-chan bool) (string, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/build/build.go b/vendor/github.com/docker/docker/api/server/router/build/build.go new file mode 100644 index 00000000..dc85d1df --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/build/build.go @@ -0,0 +1,29 @@ +package build + +import "github.com/docker/docker/api/server/router" + +// buildRouter is a router to talk with the build controller +type buildRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b Backend) router.Router { + r := &buildRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the build controller +func (r *buildRouter) Routes() []router.Route { + return r.routes +} + +func (r *buildRouter) initRoutes() { + r.routes = []router.Route{ + router.NewPostRoute("/build", r.postBuild), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/build/build_routes.go b/vendor/github.com/docker/docker/api/server/router/build/build_routes.go new file mode 100644 index 00000000..a6b787d5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/build/build_routes.go @@ -0,0 +1,213 @@ +package build + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-units" + "golang.org/x/net/context" +) + +func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { + version := httputils.VersionFromContext(ctx) + options := &types.ImageBuildOptions{} + if httputils.BoolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") { + options.Remove = true + } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { + options.Remove = true + } else { + options.Remove = httputils.BoolValue(r, "rm") + } + if httputils.BoolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") { + options.PullParent = true + } + + options.Dockerfile = r.FormValue("dockerfile") + options.SuppressOutput = httputils.BoolValue(r, "q") + options.NoCache = httputils.BoolValue(r, "nocache") + options.ForceRemove = httputils.BoolValue(r, "forcerm") + options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") + options.Memory = httputils.Int64ValueOrZero(r, "memory") + options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") + options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") + options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") + options.CPUSetCPUs = r.FormValue("cpusetcpus") + options.CPUSetMems = r.FormValue("cpusetmems") + options.CgroupParent = r.FormValue("cgroupparent") + options.Tags = r.Form["t"] + + if r.Form.Get("shmsize") != "" { + shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) + if err != nil { + return nil, err + } + options.ShmSize = shmSize + } + + if i := container.Isolation(r.FormValue("isolation")); i != "" { + if !container.Isolation.IsValid(i) { + return nil, fmt.Errorf("Unsupported isolation: %q", i) + } + options.Isolation = i + } + + var buildUlimits = []*units.Ulimit{} + ulimitsJSON := r.FormValue("ulimits") + if ulimitsJSON != "" { + if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil { + return nil, err + } + options.Ulimits = buildUlimits + } + + var buildArgs = map[string]string{} + buildArgsJSON := r.FormValue("buildargs") + if buildArgsJSON != "" { + if err := json.NewDecoder(strings.NewReader(buildArgsJSON)).Decode(&buildArgs); err != nil { + return nil, err + } + options.BuildArgs = buildArgs + } + var labels = map[string]string{} + labelsJSON := r.FormValue("labels") + if labelsJSON != "" { + if err := json.NewDecoder(strings.NewReader(labelsJSON)).Decode(&labels); err != nil { + return nil, err + } + options.Labels = labels + } + + return options, nil +} + +type syncWriter struct { + w io.Writer + mu sync.Mutex +} + +func (s *syncWriter) Write(b []byte) (count int, err error) { + s.mu.Lock() + count, err = s.w.Write(b) + s.mu.Unlock() + return +} + +func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + authConfigs = map[string]types.AuthConfig{} + authConfigsEncoded = r.Header.Get("X-Registry-Config") + notVerboseBuffer = bytes.NewBuffer(nil) + ) + + if authConfigsEncoded != "" { + authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) + if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting + // to be empty. + } + } + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + sf := streamformatter.NewJSONStreamFormatter() + errf := func(err error) error { + if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { + output.Write(notVerboseBuffer.Bytes()) + } + // Do not write the error in the http output if it's still empty. + // This prevents from writing a 200(OK) when there is an internal error. + if !output.Flushed() { + return err + } + _, err = w.Write(sf.FormatError(err)) + if err != nil { + logrus.Warnf("could not write error response: %v", err) + } + return nil + } + + buildOptions, err := newImageBuildOptions(ctx, r) + if err != nil { + return errf(err) + } + + remoteURL := r.FormValue("remote") + + // Currently, only used if context is from a remote url. + // Look at code in DetectContextFromRemoteURL for more information. + createProgressReader := func(in io.ReadCloser) io.ReadCloser { + progressOutput := sf.NewProgressOutput(output, true) + if buildOptions.SuppressOutput { + progressOutput = sf.NewProgressOutput(notVerboseBuffer, true) + } + return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL) + } + + var ( + context builder.ModifiableContext + dockerfileName string + out io.Writer + ) + context, dockerfileName, err = builder.DetectContextFromRemoteURL(r.Body, remoteURL, createProgressReader) + if err != nil { + return errf(err) + } + defer func() { + if err := context.Close(); err != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + } + }() + if len(dockerfileName) > 0 { + buildOptions.Dockerfile = dockerfileName + } + + buildOptions.AuthConfigs = authConfigs + + out = output + if buildOptions.SuppressOutput { + out = notVerboseBuffer + } + out = &syncWriter{w: out} + stdout := &streamformatter.StdoutFormatter{Writer: out, StreamFormatter: sf} + stderr := &streamformatter.StderrFormatter{Writer: out, StreamFormatter: sf} + + closeNotifier := make(<-chan bool) + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + imgID, err := br.backend.Build(ctx, buildOptions, + builder.DockerIgnoreContext{ModifiableContext: context}, + stdout, stderr, out, + closeNotifier) + if err != nil { + return errf(err) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if buildOptions.SuppressOutput { + stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} + fmt.Fprintf(stdout, "%s\n", string(imgID)) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/backend.go b/vendor/github.com/docker/docker/api/server/router/container/backend.go new file mode 100644 index 00000000..bd891975 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/backend.go @@ -0,0 +1,71 @@ +package container + +import ( + "io" + "time" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/version" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" +) + +// execBackend includes functions to implement to provide exec functionality. +type execBackend interface { + ContainerExecCreate(config *types.ExecConfig) (string, error) + ContainerExecInspect(id string) (*backend.ExecInspect, error) + ContainerExecResize(name string, height, width int) error + ContainerExecStart(name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error + ExecExists(name string) (bool, error) +} + +// copyBackend includes functions to implement to provide container copy functionality. +type copyBackend interface { + ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) + ContainerCopy(name string, res string) (io.ReadCloser, error) + ContainerExport(name string, out io.Writer) error + ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error + ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) +} + +// stateBackend includes functions to implement to provide container state lifecycle functionality. +type stateBackend interface { + ContainerCreate(types.ContainerCreateConfig) (types.ContainerCreateResponse, error) + ContainerKill(name string, sig uint64) error + ContainerPause(name string) error + ContainerRename(oldName, newName string) error + ContainerResize(name string, height, width int) error + ContainerRestart(name string, seconds int) error + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerStart(name string, hostConfig *container.HostConfig) error + ContainerStop(name string, seconds int) error + ContainerUnpause(name string) error + ContainerUpdate(name string, hostConfig *container.HostConfig) ([]string, error) + ContainerWait(name string, timeout time.Duration) (int, error) +} + +// monitorBackend includes functions to implement to provide containers monitoring functionality. +type monitorBackend interface { + ContainerChanges(name string) ([]archive.Change, error) + ContainerInspect(name string, size bool, version version.Version) (interface{}, error) + ContainerLogs(name string, config *backend.ContainerLogsConfig, started chan struct{}) error + ContainerStats(name string, config *backend.ContainerStatsConfig) error + ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) + + Containers(config *types.ContainerListOptions) ([]*types.Container, error) +} + +// attachBackend includes function to implement to provide container attaching functionality. +type attachBackend interface { + ContainerAttach(name string, c *backend.ContainerAttachConfig) error +} + +// Backend is all the methods that need to be implemented to provide container specific functionality. +type Backend interface { + execBackend + copyBackend + stateBackend + monitorBackend + attachBackend +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/container.go b/vendor/github.com/docker/docker/api/server/router/container/container.go new file mode 100644 index 00000000..873f13d2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/container.go @@ -0,0 +1,63 @@ +package container + +import "github.com/docker/docker/api/server/router" + +// containerRouter is a router to talk with the container controller +type containerRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new container router +func NewRouter(b Backend) router.Router { + r := &containerRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the container controller +func (r *containerRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in container router +func (r *containerRouter) initRoutes() { + r.routes = []router.Route{ + // HEAD + router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), + // GET + router.NewGetRoute("/containers/json", r.getContainersJSON), + router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), + router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), + router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), + router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), + router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs), + router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats), + router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), + router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), + router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), + // POST + router.NewPostRoute("/containers/create", r.postContainersCreate), + router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), + router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), + router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), + router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), + router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), + router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), + router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait), + router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), + router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), + router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), + router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), + router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), + router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), + router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), + router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), + // PUT + router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), + // DELETE + router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/container_routes.go b/vendor/github.com/docker/docker/api/server/router/container/container_routes.go new file mode 100644 index 00000000..016e00f0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/container_routes.go @@ -0,0 +1,531 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/runconfig" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" + "golang.org/x/net/websocket" +) + +func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + config := &types.ContainerListOptions{ + All: httputils.BoolValue(r, "all"), + Size: httputils.BoolValue(r, "size"), + Since: r.Form.Get("since"), + Before: r.Form.Get("before"), + Filter: filter, + } + + if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { + limit, err := strconv.Atoi(tmpLimit) + if err != nil { + return err + } + config.Limit = limit + } + + containers, err := s.backend.Containers(config) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, containers) +} + +func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + stream := httputils.BoolValueOrDefault(r, "stream", true) + if !stream { + w.Header().Set("Content-Type", "application/json") + } + + var closeNotifier <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + config := &backend.ContainerStatsConfig{ + Stream: stream, + OutStream: w, + Stop: closeNotifier, + Version: string(httputils.VersionFromContext(ctx)), + } + + return s.backend.ContainerStats(vars["name"], config) +} + +func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + var closeNotifier <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + containerName := vars["name"] + logsConfig := &backend.ContainerLogsConfig{ + ContainerLogsOptions: types.ContainerLogsOptions{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: r.Form.Get("since"), + Tail: r.Form.Get("tail"), + ShowStdout: stdout, + ShowStderr: stderr, + }, + OutStream: w, + Stop: closeNotifier, + } + + chStarted := make(chan struct{}) + if err := s.backend.ContainerLogs(containerName, logsConfig, chStarted); err != nil { + select { + case <-chStarted: + // The client may be expecting all of the data we're sending to + // be multiplexed, so send it through OutStream, which will + // have been set up to handle that if needed. + fmt.Fprintf(logsConfig.OutStream, "Error running logs job: %v\n", err) + default: + return err + } + } + + return nil +} + +func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.backend.ContainerExport(vars["name"], w) +} + +func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // https://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + var hostConfig *container.HostConfig + if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + c, err := runconfig.DecodeHostConfig(r.Body) + if err != nil { + return err + } + + hostConfig = c + } + + if err := s.backend.ContainerStart(vars["name"], hostConfig); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + seconds, _ := strconv.Atoi(r.Form.Get("t")) + + if err := s.backend.ContainerStop(vars["name"], seconds); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +type errContainerIsRunning interface { + ContainerIsRunning() bool +} + +func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var sig syscall.Signal + name := vars["name"] + + // If we have a signal, look at it. Otherwise, do nothing + if sigStr := r.Form.Get("signal"); sigStr != "" { + var err error + if sig, err = signal.ParseSignal(sigStr); err != nil { + return err + } + } + + if err := s.backend.ContainerKill(name, uint64(sig)); err != nil { + var isStopped bool + if e, ok := err.(errContainerIsRunning); ok { + isStopped = !e.ContainerIsRunning() + } + + // Return error that's not caused because the container is stopped. + // Return error if the container is not running and the api is >= 1.20 + // to keep backwards compatibility. + version := httputils.VersionFromContext(ctx) + if version.GreaterThanOrEqualTo("1.20") || !isStopped { + return fmt.Errorf("Cannot kill container %s: %v", name, err) + } + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + timeout, _ := strconv.Atoi(r.Form.Get("t")) + + if err := s.backend.ContainerRestart(vars["name"], timeout); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerPause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerUnpause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + status, err := s.backend.ContainerWait(vars["name"], -1*time.Second) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &types.ContainerWaitResponse{ + StatusCode: status, + }) +} + +func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + changes, err := s.backend.ContainerChanges(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, changes) +} + +func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, procList) +} + +func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + newName := r.Form.Get("name") + if err := s.backend.ContainerRename(name, newName); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var updateConfig container.UpdateConfig + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&updateConfig); err != nil { + return err + } + + hostConfig := &container.HostConfig{ + Resources: updateConfig.Resources, + RestartPolicy: updateConfig.RestartPolicy, + } + + name := vars["name"] + warnings, err := s.backend.ContainerUpdate(name, hostConfig) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &types.ContainerUpdateResponse{ + Warnings: warnings, + }) +} + +func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + name := r.Form.Get("name") + + config, hostConfig, networkingConfig, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil { + return err + } + version := httputils.VersionFromContext(ctx) + adjustCPUShares := version.LessThan("1.19") + + ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ + Name: name, + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + AdjustCPUShares: adjustCPUShares, + }) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, ccr) +} + +func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.ContainerRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + RemoveVolume: httputils.BoolValue(r, "v"), + RemoveLink: httputils.BoolValue(r, "link"), + } + + if err := s.backend.ContainerRm(name, config); err != nil { + // Force a 404 for the empty string + if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") { + return fmt.Errorf("no such container: \"\"") + } + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerResize(vars["name"], height, width) +} + +func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + err := httputils.ParseForm(r) + if err != nil { + return err + } + containerName := vars["name"] + + _, upgrade := r.Header["Upgrade"] + + keys := []byte{} + detachKeys := r.FormValue("detachKeys") + if detachKeys != "" { + keys, err = term.ToBytes(detachKeys) + if err != nil { + logrus.Warnf("Invalid escape keys provided (%s) using default : ctrl-p ctrl-q", detachKeys) + } + } + + hijacker, ok := w.(http.Hijacker) + if !ok { + return fmt.Errorf("error attaching to container %s, hijack connection missing", containerName) + } + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + conn, _, err := hijacker.Hijack() + if err != nil { + return nil, nil, nil, err + } + + // set raw mode + conn.Write([]byte{}) + + if upgrade { + fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + closer := func() error { + httputils.CloseStreams(conn) + return nil + } + return ioutils.NewReadCloserWrapper(conn, closer), conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + UseStdin: httputils.BoolValue(r, "stdin"), + UseStdout: httputils.BoolValue(r, "stdout"), + UseStderr: httputils.BoolValue(r, "stderr"), + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: keys, + MuxStreams: true, + } + + return s.backend.ContainerAttach(containerName, attachConfig) +} + +func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + containerName := vars["name"] + + var keys []byte + var err error + detachKeys := r.FormValue("detachKeys") + if detachKeys != "" { + keys, err = term.ToBytes(detachKeys) + if err != nil { + logrus.Warnf("Invalid escape keys provided (%s) using default : ctrl-p ctrl-q", detachKeys) + } + } + + done := make(chan struct{}) + started := make(chan struct{}) + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + wsChan := make(chan *websocket.Conn) + h := func(conn *websocket.Conn) { + wsChan <- conn + <-done + } + + srv := websocket.Server{Handler: h, Handshake: nil} + go func() { + close(started) + srv.ServeHTTP(w, r) + }() + + conn := <-wsChan + return conn, conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: keys, + UseStdin: true, + UseStdout: true, + UseStderr: true, + MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr + } + + err = s.backend.ContainerAttach(containerName, attachConfig) + close(done) + select { + case <-started: + logrus.Errorf("Error attaching websocket: %s", err) + return nil + default: + } + return err +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/copy.go b/vendor/github.com/docker/docker/api/server/router/container/copy.go new file mode 100644 index 00000000..69584b31 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/copy.go @@ -0,0 +1,112 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// postContainersCopy is deprecated in favor of getContainersArchive. +func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cfg := types.CopyConfig{} + if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { + return err + } + + if cfg.Resource == "" { + return fmt.Errorf("Path cannot be empty") + } + + data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "no such container") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if os.IsNotExist(err) { + return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) + } + return err + } + defer data.Close() + + w.Header().Set("Content-Type", "application/x-tar") + if _, err := io.Copy(w, data); err != nil { + return err + } + + return nil +} + +// // Encode the stat to JSON, base64 encode, and place in a header. +func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { + statJSON, err := json.Marshal(stat) + if err != nil { + return err + } + + header.Set( + "X-Docker-Container-Path-Stat", + base64.StdEncoding.EncodeToString(statJSON), + ) + + return nil +} + +func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + stat, err := s.backend.ContainerStatPath(v.Name, v.Path) + if err != nil { + return err + } + + return setContainerPathStatHeader(stat, w.Header()) +} + +func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path) + if err != nil { + return err + } + defer tarArchive.Close() + + if err := setContainerPathStatHeader(stat, w.Header()); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, tarArchive) + + return err +} + +func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") + return s.backend.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body) +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/exec.go b/vendor/github.com/docker/docker/api/server/router/container/exec.go new file mode 100644 index 00000000..1a3ddc4c --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/exec.go @@ -0,0 +1,134 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + eConfig, err := s.backend.ContainerExecInspect(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, eConfig) +} + +func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + name := vars["name"] + + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { + return err + } + execConfig.Container = name + + if len(execConfig.Cmd) == 0 { + return fmt.Errorf("No exec command specified") + } + + // Register an instance of Exec in container. + id, err := s.backend.ContainerExecCreate(execConfig) + if err != nil { + logrus.Errorf("Error setting up exec command in container %s: %v", name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{ + ID: id, + }) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + version := httputils.VersionFromContext(ctx) + if version.GreaterThan("1.21") { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + } + + var ( + execName = vars["name"] + stdin, inStream io.ReadCloser + stdout, stderr, outStream io.Writer + ) + + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { + return err + } + + if exists, err := s.backend.ExecExists(execName); !exists { + return err + } + + if !execStartCheck.Detach { + var err error + // Setting up the streaming http interface. + inStream, outStream, err = httputils.HijackConnection(w) + if err != nil { + return err + } + defer httputils.CloseStreams(inStream, outStream) + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + stdin = inStream + stdout = outStream + if !execStartCheck.Tty { + stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + } + + // Now run the user process in container. + if err := s.backend.ContainerExecStart(execName, stdin, stdout, stderr); err != nil { + if execStartCheck.Detach { + return err + } + stdout.Write([]byte(err.Error())) + logrus.Errorf("Error running exec in container: %v\n", err) + return err + } + return nil +} + +func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerExecResize(vars["name"], height, width) +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/inspect.go b/vendor/github.com/docker/docker/api/server/router/container/inspect.go new file mode 100644 index 00000000..dbbced7e --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/inspect.go @@ -0,0 +1,21 @@ +package container + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// getContainersByName inspects container's configuration and serializes it as json. +func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + displaySize := httputils.BoolValue(r, "size") + + version := httputils.VersionFromContext(ctx) + json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, json) +} diff --git a/vendor/github.com/docker/docker/api/server/router/image/backend.go b/vendor/github.com/docker/docker/api/server/router/image/backend.go new file mode 100644 index 00000000..dfb02a4d --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/image/backend.go @@ -0,0 +1,44 @@ +package image + +import ( + "io" + + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/registry" + "golang.org/x/net/context" +) + +// Backend is all the methods that need to be implemented +// to provide image specific functionality. +type Backend interface { + containerBackend + imageBackend + importExportBackend + registryBackend +} + +type containerBackend interface { + Commit(name string, config *types.ContainerCommitConfig) (imageID string, err error) +} + +type imageBackend interface { + ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) + ImageHistory(imageName string) ([]*types.ImageHistory, error) + Images(filterArgs string, filter string, all bool) ([]*types.Image, error) + LookupImage(name string) (*types.ImageInspect, error) + TagImage(newTag reference.Named, imageName string) error +} + +type importExportBackend interface { + LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error + ImportImage(src string, newRef reference.Named, msg string, inConfig io.ReadCloser, outStream io.Writer, config *container.Config) error + ExportImage(names []string, outStream io.Writer) error +} + +type registryBackend interface { + PullImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + PushImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + SearchRegistryForImages(ctx context.Context, term string, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/image/image.go b/vendor/github.com/docker/docker/api/server/router/image/image.go new file mode 100644 index 00000000..d6a1297a --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/image/image.go @@ -0,0 +1,44 @@ +package image + +import "github.com/docker/docker/api/server/router" + +// imageRouter is a router to talk with the image controller +type imageRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new image router +func NewRouter(backend Backend) router.Router { + r := &imageRouter{ + backend: backend, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the image controller +func (r *imageRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in the image router +func (r *imageRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/images/json", r.getImagesJSON), + router.NewGetRoute("/images/search", r.getImagesSearch), + router.NewGetRoute("/images/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), + router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName), + // POST + router.NewPostRoute("/commit", r.postCommit), + router.NewPostRoute("/images/create", r.postImagesCreate), + router.NewPostRoute("/images/load", r.postImagesLoad), + router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush), + router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), + // DELETE + router.NewDeleteRoute("/images/{name:.*}", r.deleteImages), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/image/image_routes.go b/vendor/github.com/docker/docker/api/server/router/image/image_routes.go new file mode 100644 index 00000000..58abd4b4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/image/image_routes.go @@ -0,0 +1,383 @@ +package image + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/reference" + "github.com/docker/docker/runconfig" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "golang.org/x/net/context" +) + +func (s *imageRouter) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cname := r.Form.Get("container") + + pause := httputils.BoolValue(r, "pause") + version := httputils.VersionFromContext(ctx) + if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { + pause = true + } + + c, _, _, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil && err != io.EOF { //Do not fail if body is empty. + return err + } + if c == nil { + c = &container.Config{} + } + + newConfig, err := dockerfile.BuildFromConfig(c, r.Form["changes"]) + if err != nil { + return err + } + + commitCfg := &types.ContainerCommitConfig{ + Pause: pause, + Repo: r.Form.Get("repo"), + Tag: r.Form.Get("tag"), + Author: r.Form.Get("author"), + Comment: r.Form.Get("comment"), + Config: newConfig, + MergeConfigs: true, + } + + imgID, err := s.backend.Commit(cname, commitCfg) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerCommitResponse{ + ID: string(imgID), + }) +} + +// Creates an image from Pull or from Import +func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + message = r.Form.Get("message") + err error + output = ioutils.NewWriteFlusher(w) + ) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if image != "" { //pull + // Special case: "pull -a" may send an image name with a + // trailing :. This is ugly, but let's not break API + // compatibility. + image = strings.TrimSuffix(image, ":") + + var ref reference.Named + ref, err = reference.ParseNamed(image) + if err == nil { + if tag != "" { + // The "tag" could actually be a digest. + var dgst digest.Digest + dgst, err = digest.ParseDigest(tag) + if err == nil { + ref, err = reference.WithDigest(ref, dgst) + } else { + ref, err = reference.WithTag(ref, tag) + } + } + if err == nil { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } + + err = s.backend.PullImage(ctx, ref, metaHeaders, authConfig, output) + } + } + } else { //import + var newRef reference.Named + if repo != "" { + var err error + newRef, err = reference.ParseNamed(repo) + if err != nil { + return err + } + + if _, isCanonical := newRef.(reference.Canonical); isCanonical { + return errors.New("cannot import digest reference") + } + + if tag != "" { + newRef, err = reference.WithTag(newRef, tag) + if err != nil { + return err + } + } + } + + src := r.Form.Get("fromSrc") + + // 'err' MUST NOT be defined within this block, we need any error + // generated from the download to be available to the output + // stream processing below + var newConfig *container.Config + newConfig, err = dockerfile.BuildFromConfig(&container.Config{}, r.Form["changes"]) + if err != nil { + return err + } + + err = s.backend.ImportImage(src, newRef, message, r.Body, output, newConfig) + } + if err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + + return nil +} + +func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := httputils.ParseForm(r); err != nil { + return err + } + authConfig := &types.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) + } + } + + ref, err := reference.ParseNamed(vars["name"]) + if err != nil { + return err + } + tag := r.Form.Get("tag") + if tag != "" { + // Push by digest is not supported, so only tags are supported. + ref, err = reference.WithTag(ref, tag) + if err != nil { + return err + } + } + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if err := s.backend.PushImage(ctx, ref, metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + var names []string + if name, ok := vars["name"]; ok { + names = []string{name} + } else { + names = r.Form["names"] + } + + if err := s.backend.ExportImage(names, output); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + quiet := httputils.BoolValueOrDefault(r, "quiet", true) + + if !quiet { + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + if err := s.backend.LoadImage(r.Body, output, quiet); err != nil { + output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + } + return nil + } + return s.backend.LoadImage(r.Body, w, quiet) +} + +func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + + if strings.TrimSpace(name) == "" { + return fmt.Errorf("image name cannot be blank") + } + + force := httputils.BoolValue(r, "force") + prune := !httputils.BoolValue(r, "noprune") + + list, err := s.backend.ImageDelete(name, force, prune) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + imageInspect, err := s.backend.LookupImage(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, imageInspect) +} + +func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // FIXME: The filter parameter could just be a match filter + images, err := s.backend.Images(r.Form.Get("filters"), r.Form.Get("filter"), httputils.BoolValue(r, "all")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, images) +} + +func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + name := vars["name"] + history, err := s.backend.ImageHistory(name) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, history) +} + +func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + repo := r.Form.Get("repo") + tag := r.Form.Get("tag") + newTag, err := reference.WithName(repo) + if err != nil { + return err + } + if tag != "" { + if newTag, err = reference.WithTag(newTag, tag); err != nil { + return err + } + } + if err := s.backend.TagImage(newTag, vars["name"]); err != nil { + return err + } + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + var ( + config *types.AuthConfig + authEncoded = r.Header.Get("X-Registry-Auth") + headers = map[string][]string{} + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &types.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + headers[k] = v + } + } + query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("term"), config, headers) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, query.Results) +} diff --git a/vendor/github.com/docker/docker/api/server/router/local.go b/vendor/github.com/docker/docker/api/server/router/local.go new file mode 100644 index 00000000..99db4242 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/local.go @@ -0,0 +1,61 @@ +package router + +import "github.com/docker/docker/api/server/httputils" + +// localRoute defines an individual API route to connect +// with the docker daemon. It implements Route. +type localRoute struct { + method string + path string + handler httputils.APIFunc +} + +// Handler returns the APIFunc to let the server wrap it in middlewares. +func (l localRoute) Handler() httputils.APIFunc { + return l.handler +} + +// Method returns the http method that the route responds to. +func (l localRoute) Method() string { + return l.method +} + +// Path returns the subpath where the route responds to. +func (l localRoute) Path() string { + return l.path +} + +// NewRoute initializes a new local route for the router. +func NewRoute(method, path string, handler httputils.APIFunc) Route { + return localRoute{method, path, handler} +} + +// NewGetRoute initializes a new route with the http method GET. +func NewGetRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("GET", path, handler) +} + +// NewPostRoute initializes a new route with the http method POST. +func NewPostRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("POST", path, handler) +} + +// NewPutRoute initializes a new route with the http method PUT. +func NewPutRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("PUT", path, handler) +} + +// NewDeleteRoute initializes a new route with the http method DELETE. +func NewDeleteRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("DELETE", path, handler) +} + +// NewOptionsRoute initializes a new route with the http method OPTIONS. +func NewOptionsRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("OPTIONS", path, handler) +} + +// NewHeadRoute initializes a new route with the http method HEAD. +func NewHeadRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("HEAD", path, handler) +} diff --git a/vendor/github.com/docker/docker/api/server/router/router.go b/vendor/github.com/docker/docker/api/server/router/router.go new file mode 100644 index 00000000..2de25c27 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/router.go @@ -0,0 +1,19 @@ +package router + +import "github.com/docker/docker/api/server/httputils" + +// Router defines an interface to specify a group of routes to add to the docker server. +type Router interface { + // Routes returns the list of routes to add to the docker server. + Routes() []Route +} + +// Route defines an individual API route in the docker server. +type Route interface { + // Handler returns the raw function to create the http handler. + Handler() httputils.APIFunc + // Method returns the http method that the route responds to. + Method() string + // Path returns the subpath where the route responds to. + Path() string +} diff --git a/vendor/github.com/docker/docker/api/server/router/system/backend.go b/vendor/github.com/docker/docker/api/server/router/system/backend.go new file mode 100644 index 00000000..e6284cd4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/system/backend.go @@ -0,0 +1,18 @@ +package system + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" +) + +// Backend is the methods that need to be implemented to provide +// system specific functionality. +type Backend interface { + SystemInfo() (*types.Info, error) + SystemVersion() types.Version + SubscribeToEvents(since, sinceNano int64, ef filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(chan interface{}) + AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/system/system.go b/vendor/github.com/docker/docker/api/server/router/system/system.go new file mode 100644 index 00000000..76da5c52 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/system/system.go @@ -0,0 +1,33 @@ +package system + +import "github.com/docker/docker/api/server/router" + +// systemRouter provides information about the Docker system overall. +// It gathers information about host, daemon and container events. +type systemRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new system router +func NewRouter(b Backend) router.Router { + r := &systemRouter{ + backend: b, + } + + r.routes = []router.Route{ + router.NewOptionsRoute("/{anyroute:.*}", optionsHandler), + router.NewGetRoute("/_ping", pingHandler), + router.NewGetRoute("/events", r.getEvents), + router.NewGetRoute("/info", r.getInfo), + router.NewGetRoute("/version", r.getVersion), + router.NewPostRoute("/auth", r.postAuth), + } + + return r +} + +// Routes returns all the API routes dedicated to the docker system +func (s *systemRouter) Routes() []router.Route { + return s.routes +} diff --git a/vendor/github.com/docker/docker/api/server/router/system/system_routes.go b/vendor/github.com/docker/docker/api/server/router/system/system_routes.go new file mode 100644 index 00000000..defaa0d6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/system/system_routes.go @@ -0,0 +1,125 @@ +package system + +import ( + "encoding/json" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" + timetypes "github.com/docker/engine-api/types/time" + "golang.org/x/net/context" +) + +func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} + +func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} + +func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info, err := s.backend.SystemInfo() + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info := s.backend.SystemVersion() + info.APIVersion = api.DefaultVersion.String() + + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + since, sinceNano, err := timetypes.ParseTimestamps(r.Form.Get("since"), -1) + if err != nil { + return err + } + until, untilNano, err := timetypes.ParseTimestamps(r.Form.Get("until"), -1) + if err != nil { + return err + } + + var timeout <-chan time.Time + if until > 0 || untilNano > 0 { + dur := time.Unix(until, untilNano).Sub(time.Now()) + timeout = time.NewTimer(dur).C + } + + ef, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + defer output.Close() + output.Flush() + + enc := json.NewEncoder(output) + + buffered, l := s.backend.SubscribeToEvents(since, sinceNano, ef) + defer s.backend.UnsubscribeFromEvents(l) + + for _, ev := range buffered { + if err := enc.Encode(ev); err != nil { + return err + } + } + + var closeNotify <-chan bool + if closeNotifier, ok := w.(http.CloseNotifier); ok { + closeNotify = closeNotifier.CloseNotify() + } + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + logrus.Warnf("unexpected event message: %q", ev) + continue + } + if err := enc.Encode(jev); err != nil { + return err + } + case <-timeout: + return nil + case <-closeNotify: + logrus.Debug("Client disconnected, stop sending events") + return nil + } + } +} + +func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config *types.AuthConfig + err := json.NewDecoder(r.Body).Decode(&config) + r.Body.Close() + if err != nil { + return err + } + status, token, err := s.backend.AuthenticateToRegistry(ctx, config) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &types.AuthResponse{ + Status: status, + IdentityToken: token, + }) +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/backend.go b/vendor/github.com/docker/docker/api/server/router/volume/backend.go new file mode 100644 index 00000000..fbf5ed27 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/backend.go @@ -0,0 +1,15 @@ +package volume + +import ( + // TODO return types need to be refactored into pkg + "github.com/docker/engine-api/types" +) + +// Backend is the methods that need to be implemented to provide +// volume specific functionality +type Backend interface { + Volumes(filter string) ([]*types.Volume, []string, error) + VolumeInspect(name string) (*types.Volume, error) + VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) + VolumeRm(name string) error +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/volume.go b/vendor/github.com/docker/docker/api/server/router/volume/volume.go new file mode 100644 index 00000000..2683dcec --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/volume.go @@ -0,0 +1,35 @@ +package volume + +import "github.com/docker/docker/api/server/router" + +// volumeRouter is a router to talk with the volumes controller +type volumeRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new volume router +func NewRouter(b Backend) router.Router { + r := &volumeRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the volumes controller +func (r *volumeRouter) Routes() []router.Route { + return r.routes +} + +func (r *volumeRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/volumes", r.getVolumesList), + router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), + // POST + router.NewPostRoute("/volumes/create", r.postVolumesCreate), + // DELETE + router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go b/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go new file mode 100644 index 00000000..5aa0d4a7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go @@ -0,0 +1,66 @@ +package volume + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volumes, warnings, err := v.backend.Volumes(r.Form.Get("filters")) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &types.VolumesListResponse{Volumes: volumes, Warnings: warnings}) +} + +func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volume, err := v.backend.VolumeInspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, volume) +} + +func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var req types.VolumeCreateRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusCreated, volume) +} + +func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := v.backend.VolumeRm(vars["name"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} diff --git a/vendor/github.com/docker/docker/api/server/router_swapper.go b/vendor/github.com/docker/docker/api/server/router_swapper.go new file mode 100644 index 00000000..1ecc7a7f --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router_swapper.go @@ -0,0 +1,30 @@ +package server + +import ( + "net/http" + "sync" + + "github.com/gorilla/mux" +) + +// routerSwapper is an http.Handler that allows you to swap +// mux routers. +type routerSwapper struct { + mu sync.Mutex + router *mux.Router +} + +// Swap changes the old router with the new one. +func (rs *routerSwapper) Swap(newRouter *mux.Router) { + rs.mu.Lock() + rs.router = newRouter + rs.mu.Unlock() +} + +// ServeHTTP makes the routerSwapper to implement the http.Handler interface. +func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { + rs.mu.Lock() + router := rs.router + rs.mu.Unlock() + router.ServeHTTP(w, r) +} diff --git a/vendor/github.com/docker/docker/api/server/server.go b/vendor/github.com/docker/docker/api/server/server.go new file mode 100644 index 00000000..1379b737 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/server.go @@ -0,0 +1,195 @@ +package server + +import ( + "crypto/tls" + "net" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/pkg/authorization" + "github.com/gorilla/mux" + "golang.org/x/net/context" +) + +// versionMatcher defines a variable matcher to be parsed by the router +// when a request is about to be served. +const versionMatcher = "/v{version:[0-9.]+}" + +// Config provides the configuration for the API server +type Config struct { + Logging bool + EnableCors bool + CorsHeaders string + AuthorizationPluginNames []string + Version string + SocketGroup string + TLSConfig *tls.Config +} + +// Server contains instance details for the server +type Server struct { + cfg *Config + servers []*HTTPServer + routers []router.Router + authZPlugins []authorization.Plugin + routerSwapper *routerSwapper +} + +// New returns a new instance of the server based on the specified configuration. +// It allocates resources which will be needed for ServeAPI(ports, unix-sockets). +func New(cfg *Config) *Server { + return &Server{ + cfg: cfg, + } +} + +// Accept sets a listener the server accepts connections into. +func (s *Server) Accept(addr string, listeners ...net.Listener) { + for _, listener := range listeners { + httpServer := &HTTPServer{ + srv: &http.Server{ + Addr: addr, + }, + l: listener, + } + s.servers = append(s.servers, httpServer) + } +} + +// Close closes servers and thus stop receiving requests +func (s *Server) Close() { + for _, srv := range s.servers { + if err := srv.Close(); err != nil { + logrus.Error(err) + } + } +} + +// serveAPI loops through all initialized servers and spawns goroutine +// with Server method for each. It sets createMux() as Handler also. +func (s *Server) serveAPI() error { + var chErrors = make(chan error, len(s.servers)) + for _, srv := range s.servers { + srv.srv.Handler = s.routerSwapper + go func(srv *HTTPServer) { + var err error + logrus.Infof("API listen on %s", srv.l.Addr()) + if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { + err = nil + } + chErrors <- err + }(srv) + } + + for i := 0; i < len(s.servers); i++ { + err := <-chErrors + if err != nil { + return err + } + } + + return nil +} + +// HTTPServer contains an instance of http server and the listener. +// srv *http.Server, contains configuration to create a http server and a mux router with all api end points. +// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. +type HTTPServer struct { + srv *http.Server + l net.Listener +} + +// Serve starts listening for inbound requests. +func (s *HTTPServer) Serve() error { + return s.srv.Serve(s.l) +} + +// Close closes the HTTPServer from listening for the inbound requests. +func (s *HTTPServer) Close() error { + return s.l.Close() +} + +func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Define the context that we'll pass around to share info + // like the docker-request-id. + // + // The 'context' will be used for global data that should + // apply to all requests. Data that is specific to the + // immediate function being called should still be passed + // as 'args' on the function call. + ctx := context.Background() + handlerFunc := s.handleWithGlobalMiddlewares(handler) + + vars := mux.Vars(r) + if vars == nil { + vars = make(map[string]string) + } + + if err := handlerFunc(ctx, w, r, vars); err != nil { + logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) + httputils.WriteError(w, err) + } + } +} + +// InitRouter initializes the list of routers for the server. +// This method also enables the Go profiler if enableProfiler is true. +func (s *Server) InitRouter(enableProfiler bool, routers ...router.Router) { + for _, r := range routers { + s.routers = append(s.routers, r) + } + + m := s.createMux() + if enableProfiler { + profilerSetup(m) + } + s.routerSwapper = &routerSwapper{ + router: m, + } +} + +// createMux initializes the main router the server uses. +func (s *Server) createMux() *mux.Router { + m := mux.NewRouter() + + logrus.Debugf("Registering routers") + for _, apiRouter := range s.routers { + for _, r := range apiRouter.Routes() { + f := s.makeHTTPHandler(r.Handler()) + + logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) + m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) + m.Path(r.Path()).Methods(r.Method()).Handler(f) + } + } + + return m +} + +// Wait blocks the server goroutine until it exits. +// It sends an error message if there is any error during +// the API execution. +func (s *Server) Wait(waitChan chan error) { + if err := s.serveAPI(); err != nil { + logrus.Errorf("ServeAPI error: %v", err) + waitChan <- err + return + } + waitChan <- nil +} + +// DisableProfiler reloads the server mux without adding the profiler routes. +func (s *Server) DisableProfiler() { + s.routerSwapper.Swap(s.createMux()) +} + +// EnableProfiler reloads the server mux adding the profiler routes. +func (s *Server) EnableProfiler() { + m := s.createMux() + profilerSetup(m) + s.routerSwapper.Swap(m) +} diff --git a/vendor/github.com/docker/docker/api/types/backend/backend.go b/vendor/github.com/docker/docker/api/types/backend/backend.go index c7b4f017..ffe9b709 100644 --- a/vendor/github.com/docker/docker/api/types/backend/backend.go +++ b/vendor/github.com/docker/docker/api/types/backend/backend.go @@ -6,7 +6,6 @@ package backend import ( "io" - "github.com/docker/docker/pkg/streamformatter" "github.com/docker/engine-api/types" ) @@ -18,7 +17,7 @@ type ContainerAttachConfig struct { UseStderr bool Logs bool Stream bool - DetachKeys string + DetachKeys []byte // Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/sderr messages accordingly. // TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change... @@ -32,6 +31,7 @@ type ContainerAttachConfig struct { type ContainerLogsConfig struct { types.ContainerLogsOptions OutStream io.Writer + Stop <-chan bool } // ContainerStatsConfig holds information for configuring the runtime @@ -39,6 +39,7 @@ type ContainerLogsConfig struct { type ContainerStatsConfig struct { Stream bool OutStream io.Writer + Stop <-chan bool Version string } @@ -66,20 +67,3 @@ type ExecProcessConfig struct { Privileged *bool `json:"privileged,omitempty"` User string `json:"user,omitempty"` } - -// ContainerCommitConfig is a wrapper around -// types.ContainerCommitConfig that also -// transports configuration changes for a container. -type ContainerCommitConfig struct { - types.ContainerCommitConfig - Changes []string -} - -// ProgressWriter is an interface -// to transport progress streams. -type ProgressWriter struct { - Output io.Writer - StdoutFormatter *streamformatter.StdoutFormatter - StderrFormatter *streamformatter.StderrFormatter - ProgressReaderFunc func(io.ReadCloser) io.ReadCloser -} diff --git a/vendor/github.com/docker/docker/builder/builder.go b/vendor/github.com/docker/docker/builder/builder.go index 65e7f023..6f8fdb8d 100644 --- a/vendor/github.com/docker/docker/builder/builder.go +++ b/vendor/github.com/docker/docker/builder/builder.go @@ -9,8 +9,6 @@ import ( "os" "time" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/image" "github.com/docker/docker/reference" "github.com/docker/engine-api/types" "github.com/docker/engine-api/types/container" @@ -107,27 +105,27 @@ func (fi *HashedFileInfo) SetHash(h string) { type Backend interface { // TODO: use digest reference instead of name - // GetImageOnBuild looks up a Docker image referenced by `name`. + // GetImage looks up a Docker image referenced by `name`. GetImageOnBuild(name string) (Image, error) - // TagImage tags an image with newTag - TagImageWithReference(image.ID, reference.Named) error - // PullOnBuild tells Docker to pull image referenced by `name`. + // Tag an image with newTag + TagImage(newTag reference.Named, imageName string) error + // Pull tells Docker to pull image referenced by `name`. PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error) - // ContainerAttachRaw attaches to container. + // ContainerAttach attaches to container. ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error // ContainerCreate creates a new Docker container and returns potential warnings ContainerCreate(types.ContainerCreateConfig) (types.ContainerCreateResponse, error) // ContainerRm removes a container specified by `id`. ContainerRm(name string, config *types.ContainerRmConfig) error // Commit creates a new Docker image from an existing Docker container. - Commit(string, *backend.ContainerCommitConfig) (string, error) - // ContainerKill stops the container execution abruptly. + Commit(string, *types.ContainerCommitConfig) (string, error) + // Kill stops the container execution abruptly. ContainerKill(containerID string, sig uint64) error - // ContainerStart starts a new container + // Start starts a new container ContainerStart(containerID string, hostConfig *container.HostConfig) error // ContainerWait stops processing until the given container is stopped. ContainerWait(containerID string, timeout time.Duration) (int, error) - // ContainerUpdateCmdOnBuild updates container.Path and container.Args + // ContainerUpdateCmd updates container.Path and container.Args ContainerUpdateCmdOnBuild(containerID string, cmd []string) error // ContainerCopy copies/extracts a source FileInfo to a destination path inside a container diff --git a/vendor/github.com/docker/docker/builder/context.go b/vendor/github.com/docker/docker/builder/context.go index 1b6e9d04..708f9738 100644 --- a/vendor/github.com/docker/docker/builder/context.go +++ b/vendor/github.com/docker/docker/builder/context.go @@ -6,7 +6,7 @@ import ( "io" "io/ioutil" "os" - "os/exec" + "github.com/docker/containerd/subreaper/exec" "path/filepath" "runtime" "strings" @@ -174,7 +174,7 @@ func GetContextFromLocalDir(localDir, dockerfileName string) (absContextDir, rel // the dockerfile in that context directory, and a non-nil error on success. func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) { if absContextDir, err = filepath.Abs(givenContextDir); err != nil { - return "", "", fmt.Errorf("unable to get absolute context directory of given context directory %q: %v", givenContextDir, err) + return "", "", fmt.Errorf("unable to get absolute context directory: %v", err) } // The context dir might be a symbolic link, so follow it to the actual diff --git a/vendor/github.com/docker/docker/builder/context_windows.go b/vendor/github.com/docker/docker/builder/context_windows.go deleted file mode 100644 index b8ba2ba2..00000000 --- a/vendor/github.com/docker/docker/builder/context_windows.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build windows - -package builder - -import ( - "path/filepath" - - "github.com/docker/docker/pkg/longpath" -) - -func getContextRoot(srcPath string) (string, error) { - cr, err := filepath.Abs(srcPath) - if err != nil { - return "", err - } - return longpath.AddPrefix(cr), nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/bflag.go b/vendor/github.com/docker/docker/builder/dockerfile/bflag.go new file mode 100644 index 00000000..c2e6c7da --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/bflag.go @@ -0,0 +1,176 @@ +package dockerfile + +import ( + "fmt" + "strings" +) + +// FlagType is the type of the build flag +type FlagType int + +const ( + boolType FlagType = iota + stringType +) + +// BFlags contains all flags information for the builder +type BFlags struct { + Args []string // actual flags/args from cmd line + flags map[string]*Flag + used map[string]*Flag + Err error +} + +// Flag contains all information for a flag +type Flag struct { + bf *BFlags + name string + flagType FlagType + Value string +} + +// NewBFlags return the new BFlags struct +func NewBFlags() *BFlags { + return &BFlags{ + flags: make(map[string]*Flag), + used: make(map[string]*Flag), + } +} + +// AddBool adds a bool flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddBool(name string, def bool) *Flag { + flag := bf.addFlag(name, boolType) + if flag == nil { + return nil + } + if def { + flag.Value = "true" + } else { + flag.Value = "false" + } + return flag +} + +// AddString adds a string flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddString(name string, def string) *Flag { + flag := bf.addFlag(name, stringType) + if flag == nil { + return nil + } + flag.Value = def + return flag +} + +// addFlag is a generic func used by the other AddXXX() func +// to add a new flag to the BFlags struct. +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { + if _, ok := bf.flags[name]; ok { + bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) + return nil + } + + newFlag := &Flag{ + bf: bf, + name: name, + flagType: flagType, + } + bf.flags[name] = newFlag + + return newFlag +} + +// IsUsed checks if the flag is used +func (fl *Flag) IsUsed() bool { + if _, ok := fl.bf.used[fl.name]; ok { + return true + } + return false +} + +// IsTrue checks if a bool flag is true +func (fl *Flag) IsTrue() bool { + if fl.flagType != boolType { + // Should never get here + panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) + } + return fl.Value == "true" +} + +// Parse parses and checks if the BFlags is valid. +// Any error noticed during the AddXXX() funcs will be generated/returned +// here. We do this because an error during AddXXX() is more like a +// compile time error so it doesn't matter too much when we stop our +// processing as long as we do stop it, so this allows the code +// around AddXXX() to be just: +// defFlag := AddString("description", "") +// w/o needing to add an if-statement around each one. +func (bf *BFlags) Parse() error { + // If there was an error while defining the possible flags + // go ahead and bubble it back up here since we didn't do it + // earlier in the processing + if bf.Err != nil { + return fmt.Errorf("Error setting up flags: %s", bf.Err) + } + + for _, arg := range bf.Args { + if !strings.HasPrefix(arg, "--") { + return fmt.Errorf("Arg should start with -- : %s", arg) + } + + if arg == "--" { + return nil + } + + arg = arg[2:] + value := "" + + index := strings.Index(arg, "=") + if index >= 0 { + value = arg[index+1:] + arg = arg[:index] + } + + flag, ok := bf.flags[arg] + if !ok { + return fmt.Errorf("Unknown flag: %s", arg) + } + + if _, ok = bf.used[arg]; ok { + return fmt.Errorf("Duplicate flag specified: %s", arg) + } + + bf.used[arg] = flag + + switch flag.flagType { + case boolType: + // value == "" is only ok if no "=" was specified + if index >= 0 && value == "" { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + + lower := strings.ToLower(value) + if lower == "" { + flag.Value = "true" + } else if lower == "true" || lower == "false" { + flag.Value = lower + } else { + return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) + } + + case stringType: + if index < 0 { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + flag.Value = value + + default: + panic(fmt.Errorf("No idea what kind of flag we have! Should never get here!")) + } + + } + + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder.go b/vendor/github.com/docker/docker/builder/dockerfile/builder.go new file mode 100644 index 00000000..daa70498 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder.go @@ -0,0 +1,326 @@ +package dockerfile + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "golang.org/x/net/context" +) + +var validCommitCommands = map[string]bool{ + "cmd": true, + "entrypoint": true, + "env": true, + "expose": true, + "label": true, + "onbuild": true, + "user": true, + "volume": true, + "workdir": true, +} + +// BuiltinAllowedBuildArgs is list of built-in allowed build args +var BuiltinAllowedBuildArgs = map[string]bool{ + "HTTP_PROXY": true, + "http_proxy": true, + "HTTPS_PROXY": true, + "https_proxy": true, + "FTP_PROXY": true, + "ftp_proxy": true, + "NO_PROXY": true, + "no_proxy": true, +} + +// Builder is a Dockerfile builder +// It implements the builder.Backend interface. +type Builder struct { + options *types.ImageBuildOptions + + Stdout io.Writer + Stderr io.Writer + Output io.Writer + + docker builder.Backend + context builder.Context + clientCtx context.Context + + dockerfile *parser.Node + runConfig *container.Config // runconfig for cmd, run, entrypoint etc. + flags *BFlags + tmpContainers map[string]struct{} + image string // imageID + noBaseImage bool + maintainer string + cmdSet bool + disableCommit bool + cacheBusted bool + cancelled chan struct{} + cancelOnce sync.Once + allowedBuildArgs map[string]bool // list of build-time args that are allowed for expansion/substitution and passing to commands in 'run'. + + // TODO: remove once docker.Commit can receive a tag + id string +} + +// BuildManager implements builder.Backend and is shared across all Builder objects. +type BuildManager struct { + backend builder.Backend +} + +// NewBuildManager creates a BuildManager. +func NewBuildManager(b builder.Backend) (bm *BuildManager) { + return &BuildManager{backend: b} +} + +// NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config. +// If dockerfile is nil, the Dockerfile specified by Config.DockerfileName, +// will be read from the Context passed to Build(). +func NewBuilder(clientCtx context.Context, config *types.ImageBuildOptions, backend builder.Backend, context builder.Context, dockerfile io.ReadCloser) (b *Builder, err error) { + if config == nil { + config = new(types.ImageBuildOptions) + } + if config.BuildArgs == nil { + config.BuildArgs = make(map[string]string) + } + b = &Builder{ + clientCtx: clientCtx, + options: config, + Stdout: os.Stdout, + Stderr: os.Stderr, + docker: backend, + context: context, + runConfig: new(container.Config), + tmpContainers: map[string]struct{}{}, + cancelled: make(chan struct{}), + id: stringid.GenerateNonCryptoID(), + allowedBuildArgs: make(map[string]bool), + } + if dockerfile != nil { + b.dockerfile, err = parser.Parse(dockerfile) + if err != nil { + return nil, err + } + } + + return b, nil +} + +// sanitizeRepoAndTags parses the raw "t" parameter received from the client +// to a slice of repoAndTag. +// It also validates each repoName and tag. +func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { + var ( + repoAndTags []reference.Named + // This map is used for deduplicating the "-t" parameter. + uniqNames = make(map[string]struct{}) + ) + for _, repo := range names { + if repo == "" { + continue + } + + ref, err := reference.ParseNamed(repo) + if err != nil { + return nil, err + } + + ref = reference.WithDefaultTag(ref) + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("build tag cannot contain a digest") + } + + if _, isTagged := ref.(reference.NamedTagged); !isTagged { + ref, err = reference.WithTag(ref, reference.DefaultTag) + if err != nil { + return nil, err + } + } + + nameWithTag := ref.String() + + if _, exists := uniqNames[nameWithTag]; !exists { + uniqNames[nameWithTag] = struct{}{} + repoAndTags = append(repoAndTags, ref) + } + } + return repoAndTags, nil +} + +// Build creates a NewBuilder, which builds the image. +func (bm *BuildManager) Build(clientCtx context.Context, config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer, clientGone <-chan bool) (string, error) { + b, err := NewBuilder(clientCtx, config, bm.backend, context, nil) + if err != nil { + return "", err + } + img, err := b.build(config, context, stdout, stderr, out, clientGone) + return img, err + +} + +// build runs the Dockerfile builder from a context and a docker object that allows to make calls +// to Docker. +// +// This will (barring errors): +// +// * read the dockerfile from context +// * parse the dockerfile if not already parsed +// * walk the AST and execute it by dispatching to handlers. If Remove +// or ForceRemove is set, additional cleanup around containers happens after +// processing. +// * Tag image, if applicable. +// * Print a happy message and return the image ID. +// +func (b *Builder) build(config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer, clientGone <-chan bool) (string, error) { + b.options = config + b.context = context + b.Stdout = stdout + b.Stderr = stderr + b.Output = out + + // If Dockerfile was not parsed yet, extract it from the Context + if b.dockerfile == nil { + if err := b.readDockerfile(); err != nil { + return "", err + } + } + + finished := make(chan struct{}) + defer close(finished) + go func() { + select { + case <-finished: + case <-clientGone: + b.cancelOnce.Do(func() { + close(b.cancelled) + }) + } + + }() + + repoAndTags, err := sanitizeRepoAndTags(config.Tags) + if err != nil { + return "", err + } + + if len(b.options.Labels) > 0 { + line := "LABEL " + for k, v := range b.options.Labels { + line += fmt.Sprintf("%q=%q ", k, v) + } + _, node, err := parser.ParseLine(line) + if err != nil { + return "", err + } + b.dockerfile.Children = append(b.dockerfile.Children, node) + } + + var shortImgID string + for i, n := range b.dockerfile.Children { + select { + case <-b.cancelled: + logrus.Debug("Builder: build cancelled!") + fmt.Fprintf(b.Stdout, "Build cancelled") + return "", fmt.Errorf("Build cancelled") + default: + // Not cancelled yet, keep going... + } + if err := b.dispatch(i, n); err != nil { + if b.options.ForceRemove { + b.clearTmp() + } + return "", err + } + + shortImgID = stringid.TruncateID(b.image) + fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID) + if b.options.Remove { + b.clearTmp() + } + } + + // check if there are any leftover build-args that were passed but not + // consumed during build. Return an error, if there are any. + leftoverArgs := []string{} + for arg := range b.options.BuildArgs { + if !b.isBuildArgAllowed(arg) { + leftoverArgs = append(leftoverArgs, arg) + } + } + if len(leftoverArgs) > 0 { + return "", fmt.Errorf("One or more build-args %v were not consumed, failing build.", leftoverArgs) + } + + if b.image == "" { + return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?") + } + + for _, rt := range repoAndTags { + if err := b.docker.TagImage(rt, b.image); err != nil { + return "", err + } + } + + fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID) + return b.image, nil +} + +// Cancel cancels an ongoing Dockerfile build. +func (b *Builder) Cancel() { + b.cancelOnce.Do(func() { + close(b.cancelled) + }) +} + +// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile +// It will: +// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. +// - Do build by calling builder.dispatch() to call all entries' handling routines +// +// BuildFromConfig is used by the /commit endpoint, with the changes +// coming from the query parameter of the same name. +// +// TODO: Remove? +func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) { + ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) + if err != nil { + return nil, err + } + + // ensure that the commands are valid + for _, n := range ast.Children { + if !validCommitCommands[n.Value] { + return nil, fmt.Errorf("%s is not a valid change command", n.Value) + } + } + + b, err := NewBuilder(context.Background(), nil, nil, nil, nil) + if err != nil { + return nil, err + } + b.runConfig = config + b.Stdout = ioutil.Discard + b.Stderr = ioutil.Discard + b.disableCommit = true + + for i, n := range ast.Children { + if err := b.dispatch(i, n); err != nil { + return nil, err + } + } + + return b.runConfig, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/command/command.go b/vendor/github.com/docker/docker/builder/dockerfile/command/command.go new file mode 100644 index 00000000..9e1b799d --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/command/command.go @@ -0,0 +1,42 @@ +// Package command contains the set of Dockerfile commands. +package command + +// Define constants for the command strings +const ( + Env = "env" + Label = "label" + Maintainer = "maintainer" + Add = "add" + Copy = "copy" + From = "from" + Onbuild = "onbuild" + Workdir = "workdir" + Run = "run" + Cmd = "cmd" + Entrypoint = "entrypoint" + Expose = "expose" + Volume = "volume" + User = "user" + StopSignal = "stopsignal" + Arg = "arg" +) + +// Commands is list of all Dockerfile commands +var Commands = map[string]struct{}{ + Env: {}, + Label: {}, + Maintainer: {}, + Add: {}, + Copy: {}, + From: {}, + Onbuild: {}, + Workdir: {}, + Run: {}, + Cmd: {}, + Entrypoint: {}, + Expose: {}, + Volume: {}, + User: {}, + StopSignal: {}, + Arg: {}, +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go new file mode 100644 index 00000000..ac7c2b07 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go @@ -0,0 +1,639 @@ +package dockerfile + +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "runtime" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// +func env(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("ENV") + } + + if len(args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("ENV") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + // TODO/FIXME/NOT USED + // Just here to show how to use the builder flags stuff within the + // context of a builder command. Will remove once we actually add + // a builder command to something! + /* + flBool1 := b.flags.AddBool("bool1", false) + flStr1 := b.flags.AddString("str1", "HI") + + if err := b.flags.Parse(); err != nil { + return err + } + + fmt.Printf("Bool1:%v\n", flBool1) + fmt.Printf("Str1:%v\n", flStr1) + */ + + commitStr := "ENV" + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + gotOne := false + for i, envVar := range b.runConfig.Env { + envParts := strings.SplitN(envVar, "=", 2) + if envParts[0] == args[j] { + b.runConfig.Env[i] = newVar + gotOne = true + break + } + } + if !gotOne { + b.runConfig.Env = append(b.runConfig.Env, newVar) + } + j++ + } + + return b.commit("", b.runConfig.Cmd, commitStr) +} + +// MAINTAINER some text +// +// Sets the maintainer metadata. +func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("MAINTAINER") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + b.maintainer = args[0] + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) +} + +// LABEL some json data describing the image +// +// Sets the Label variable foo to bar, +// +func label(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("LABEL") + } + if len(args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("LABEL") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + commitStr := "LABEL" + + if b.runConfig.Labels == nil { + b.runConfig.Labels = map[string]string{} + } + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + b.runConfig.Labels[args[j]] = args[j+1] + j++ + } + return b.commit("", b.runConfig.Cmd, commitStr) +} + +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +func add(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return errAtLeastOneArgument("ADD") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + return b.runContextCommand(args, true, true, "ADD") +} + +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return errAtLeastOneArgument("COPY") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + return b.runContextCommand(args, false, false, "COPY") +} + +// FROM imagename +// +// This sets the image the dockerfile will build on top of. +// +func from(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("FROM") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + name := args[0] + + var ( + image builder.Image + err error + ) + + // Windows cannot support a container with no base image. + if name == api.NoBaseImageSpecifier { + if runtime.GOOS == "windows" { + return fmt.Errorf("Windows does not support FROM scratch") + } + b.image = "" + b.noBaseImage = true + } else { + // TODO: don't use `name`, instead resolve it to a digest + if !b.options.PullParent { + image, err = b.docker.GetImageOnBuild(name) + // TODO: shouldn't we error out if error is different from "not found" ? + } + if image == nil { + image, err = b.docker.PullOnBuild(b.clientCtx, name, b.options.AuthConfigs, b.Output) + if err != nil { + return err + } + } + } + + return b.processImageFrom(image) +} + +// ONBUILD RUN echo yo +// +// ONBUILD triggers run when the image is used in a FROM statement. +// +// ONBUILD handling has a lot of special-case functionality, the heading in +// evaluator.go and comments around dispatch() in the same file explain the +// special cases. search for 'OnBuild' in internals.go for additional special +// cases. +// +func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("ONBUILD") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) + switch triggerInstruction { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + + original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") + + b.runConfig.OnBuild = append(b.runConfig.OnBuild, original) + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ONBUILD %s", original)) +} + +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +func workdir(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("WORKDIR") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + // This is from the Dockerfile and will not necessarily be in platform + // specific semantics, hence ensure it is converted. + workdir := filepath.FromSlash(args[0]) + + if !system.IsAbs(workdir) { + current := filepath.FromSlash(b.runConfig.WorkingDir) + workdir = filepath.Join(string(os.PathSeparator), current, workdir) + } + + b.runConfig.WorkingDir = workdir + + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) +} + +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is +// only one argument. The difference in processing: +// +// RUN echo hi # sh -c echo hi (Linux) +// RUN echo hi # cmd /S /C echo hi (Windows) +// RUN [ "echo", "hi" ] # echo hi +// +func run(b *Builder, args []string, attributes map[string]bool, original string) error { + if b.image == "" && !b.noBaseImage { + return fmt.Errorf("Please provide a source image with `from` prior to run") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + args = handleJSONArgs(args, attributes) + + if !attributes["json"] { + if runtime.GOOS != "windows" { + args = append([]string{"/bin/sh", "-c"}, args...) + } else { + args = append([]string{"cmd", "/S", "/C"}, args...) + } + } + + config := &container.Config{ + Cmd: strslice.StrSlice(args), + Image: b.image, + } + + // stash the cmd + cmd := b.runConfig.Cmd + if len(b.runConfig.Entrypoint) == 0 && len(b.runConfig.Cmd) == 0 { + b.runConfig.Cmd = config.Cmd + } + + // stash the config environment + env := b.runConfig.Env + + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + defer func(env []string) { b.runConfig.Env = env }(env) + + // derive the net build-time environment for this run. We let config + // environment override the build time environment. + // This means that we take the b.buildArgs list of env vars and remove + // any of those variables that are defined as part of the container. In other + // words, anything in b.Config.Env. What's left is the list of build-time env + // vars that we need to add to each RUN command - note the list could be empty. + // + // We don't persist the build time environment with container's config + // environment, but just sort and prepend it to the command string at time + // of commit. + // This helps with tracing back the image's actual environment at the time + // of RUN, without leaking it to the final image. It also aids cache + // lookup for same image built with same build time environment. + cmdBuildEnv := []string{} + configEnv := runconfigopts.ConvertKVStringsToMap(b.runConfig.Env) + for key, val := range b.options.BuildArgs { + if !b.isBuildArgAllowed(key) { + // skip build-args that are not in allowed list, meaning they have + // not been defined by an "ARG" Dockerfile command yet. + // This is an error condition but only if there is no "ARG" in the entire + // Dockerfile, so we'll generate any necessary errors after we parsed + // the entire file (see 'leftoverArgs' processing in evaluator.go ) + continue + } + if _, ok := configEnv[key]; !ok { + cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, val)) + } + } + + // derive the command to use for probeCache() and to commit in this container. + // Note that we only do this if there are any build-time env vars. Also, we + // use the special argument "|#" at the start of the args array. This will + // avoid conflicts with any RUN command since commands can not + // start with | (vertical bar). The "#" (number of build envs) is there to + // help ensure proper cache matches. We don't want a RUN command + // that starts with "foo=abc" to be considered part of a build-time env var. + saveCmd := config.Cmd + if len(cmdBuildEnv) > 0 { + sort.Strings(cmdBuildEnv) + tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...) + saveCmd = strslice.StrSlice(append(tmpEnv, saveCmd...)) + } + + b.runConfig.Cmd = saveCmd + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + // set Cmd manually, this is special case only for Dockerfiles + b.runConfig.Cmd = config.Cmd + // set build-time environment for 'run'. + b.runConfig.Env = append(b.runConfig.Env, cmdBuildEnv...) + // set config as already being escaped, this prevents double escaping on windows + b.runConfig.ArgsEscaped = true + + logrus.Debugf("[BUILDER] Command to be executed: %v", b.runConfig.Cmd) + + cID, err := b.create() + if err != nil { + return err + } + + if err := b.run(cID); err != nil { + return err + } + + // revert to original config environment and set the command string to + // have the build-time env vars in it (if any) so that future cache look-ups + // properly match it. + b.runConfig.Env = env + b.runConfig.Cmd = saveCmd + return b.commit(cID, cmd, "run") +} + +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { + if err := b.flags.Parse(); err != nil { + return err + } + + cmdSlice := handleJSONArgs(args, attributes) + + if !attributes["json"] { + if runtime.GOOS != "windows" { + cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...) + } else { + cmdSlice = append([]string{"cmd", "/S", "/C"}, cmdSlice...) + } + } + + b.runConfig.Cmd = strslice.StrSlice(cmdSlice) + + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { + return err + } + + if len(args) != 0 { + b.cmdSet = true + } + + return nil +} + +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint (which defaults to sh -c on linux, or cmd /S /C on Windows) to +// /usr/sbin/nginx. Will accept the CMD as the arguments to /usr/sbin/nginx. +// +// Handles command processing similar to CMD and RUN, only b.runConfig.Entrypoint +// is initialized at NewBuilder time instead of through argument parsing. +// +func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error { + if err := b.flags.Parse(); err != nil { + return err + } + + parsed := handleJSONArgs(args, attributes) + + switch { + case attributes["json"]: + // ENTRYPOINT ["echo", "hi"] + b.runConfig.Entrypoint = strslice.StrSlice(parsed) + case len(parsed) == 0: + // ENTRYPOINT [] + b.runConfig.Entrypoint = nil + default: + // ENTRYPOINT echo hi + if runtime.GOOS != "windows" { + b.runConfig.Entrypoint = strslice.StrSlice{"/bin/sh", "-c", parsed[0]} + } else { + b.runConfig.Entrypoint = strslice.StrSlice{"cmd", "/S", "/C", parsed[0]} + } + } + + // when setting the entrypoint if a CMD was not explicitly set then + // set the command to nil + if !b.cmdSet { + b.runConfig.Cmd = nil + } + + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.runConfig.Entrypoint)); err != nil { + return err + } + + return nil +} + +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// b.runConfig.ExposedPorts for runconfig. +// +func expose(b *Builder, args []string, attributes map[string]bool, original string) error { + portsTab := args + + if len(args) == 0 { + return errAtLeastOneArgument("EXPOSE") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + if b.runConfig.ExposedPorts == nil { + b.runConfig.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(portsTab) + if err != nil { + return err + } + + // instead of using ports directly, we build a list of ports and sort it so + // the order is consistent. This prevents cache burst where map ordering + // changes between builds + portList := make([]string, len(ports)) + var i int + for port := range ports { + if _, exists := b.runConfig.ExposedPorts[port]; !exists { + b.runConfig.ExposedPorts[port] = struct{}{} + } + portList[i] = string(port) + i++ + } + sort.Strings(portList) + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " "))) +} + +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +func user(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("USER") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + b.runConfig.User = args[0] + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("USER %v", args)) +} + +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON array form. +// +func volume(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("VOLUME") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + if b.runConfig.Volumes == nil { + b.runConfig.Volumes = map[string]struct{}{} + } + for _, v := range args { + v = strings.TrimSpace(v) + if v == "" { + return fmt.Errorf("Volume specified can not be an empty string") + } + b.runConfig.Volumes[v] = struct{}{} + } + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { + return err + } + return nil +} + +// STOPSIGNAL signal +// +// Set the signal that will be used to kill the container. +func stopSignal(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("STOPSIGNAL requires exactly one argument") + } + + sig := args[0] + _, err := signal.ParseSignal(sig) + if err != nil { + return err + } + + b.runConfig.StopSignal = sig + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("STOPSIGNAL %v", args)) +} + +// ARG name[=value] +// +// Adds the variable foo to the trusted list of variables that can be passed +// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. +// Dockerfile author may optionally set a default value of this variable. +func arg(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("ARG requires exactly one argument definition") + } + + var ( + name string + value string + hasDefault bool + ) + + arg := args[0] + // 'arg' can just be a name or name-value pair. Note that this is different + // from 'env' that handles the split of name and value at the parser level. + // The reason for doing it differently for 'arg' is that we support just + // defining an arg and not assign it a value (while 'env' always expects a + // name-value pair). If possible, it will be good to harmonize the two. + if strings.Contains(arg, "=") { + parts := strings.SplitN(arg, "=", 2) + name = parts[0] + value = parts[1] + hasDefault = true + } else { + name = arg + hasDefault = false + } + // add the arg to allowed list of build-time args from this step on. + b.allowedBuildArgs[name] = true + + // If there is a default value associated with this arg then add it to the + // b.buildArgs if one is not already passed to the builder. The args passed + // to builder override the default value of 'arg'. + if _, ok := b.options.BuildArgs[name]; !ok && hasDefault { + b.options.BuildArgs[name] = value + } + + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ARG %s", arg)) +} + +func errAtLeastOneArgument(command string) error { + return fmt.Errorf("%s requires at least one argument", command) +} + +func errExactlyOneArgument(command string) error { + return fmt.Errorf("%s requires exactly one argument", command) +} + +func errTooManyArguments(command string) error { + return fmt.Errorf("Bad input to %s, too many arguments", command) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/envVarTest b/vendor/github.com/docker/docker/builder/dockerfile/envVarTest new file mode 100644 index 00000000..1a7fe975 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/envVarTest @@ -0,0 +1,112 @@ +hello | hello +he'll'o | hello +he'llo | hello +he\'llo | he'llo +he\\'llo | he\llo +abc\tdef | abctdef +"abc\tdef" | abc\tdef +'abc\tdef' | abc\tdef +hello\ | hello +hello\\ | hello\ +"hello | hello +"hello\" | hello" +"hel'lo" | hel'lo +'hello | hello +'hello\' | hello\ +"''" | '' +$. | $. +$1 | +he$1x | hex +he$.x | he$.x +he$pwd. | he. +he$PWD | he/home +he\$PWD | he$PWD +he\\$PWD | he\/home +he\${} | he${} +he\${}xx | he${}xx +he${} | he +he${}xx | hexx +he${hi} | he +he${hi}xx | hexx +he${PWD} | he/home +he${.} | error +he${XXX:-000}xx | he000xx +he${PWD:-000}xx | he/homexx +he${XXX:-$PWD}xx | he/homexx +he${XXX:-${PWD:-yyy}}xx | he/homexx +he${XXX:-${YYY:-yyy}}xx | heyyyxx +he${XXX:YYY} | error +he${XXX:+${PWD}}xx | hexx +he${PWD:+${XXX}}xx | hexx +he${PWD:+${SHELL}}xx | hebashxx +he${XXX:+000}xx | hexx +he${PWD:+000}xx | he000xx +'he${XX}' | he${XX} +"he${PWD}" | he/home +"he'$PWD'" | he'/home' +"$PWD" | /home +'$PWD' | $PWD +'\$PWD' | \$PWD +'"hello"' | "hello" +he\$PWD | he$PWD +"he\$PWD" | he$PWD +'he\$PWD' | he\$PWD +he${PWD | error +he${PWD:=000}xx | error +he${PWD:+${PWD}:}xx | he/home:xx +he${XXX:-\$PWD:}xx | he$PWD:xx +he${XXX:-\${PWD}z}xx | he${PWDz}xx +안녕하세요 | 안녕하세요 +안'녕'하세요 | 안녕하세요 +안'녕하세요 | 안녕하세요 +안녕\'하세요 | 안녕'하세요 +안\\'녕하세요 | 안\녕하세요 +안녕\t하세요 | 안녕t하세요 +"안녕\t하세요" | 안녕\t하세요 +'안녕\t하세요 | 안녕\t하세요 +안녕하세요\ | 안녕하세요 +안녕하세요\\ | 안녕하세요\ +"안녕하세요 | 안녕하세요 +"안녕하세요\" | 안녕하세요" +"안녕'하세요" | 안녕'하세요 +'안녕하세요 | 안녕하세요 +'안녕하세요\' | 안녕하세요\ +안녕$1x | 안녕x +안녕$.x | 안녕$.x +안녕$pwd. | 안녕. +안녕$PWD | 안녕/home +안녕\$PWD | 안녕$PWD +안녕\\$PWD | 안녕\/home +안녕\${} | 안녕${} +안녕\${}xx | 안녕${}xx +안녕${} | 안녕 +안녕${}xx | 안녕xx +안녕${hi} | 안녕 +안녕${hi}xx | 안녕xx +안녕${PWD} | 안녕/home +안녕${.} | error +안녕${XXX:-000}xx | 안녕000xx +안녕${PWD:-000}xx | 안녕/homexx +안녕${XXX:-$PWD}xx | 안녕/homexx +안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx +안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx +안녕${XXX:YYY} | error +안녕${XXX:+${PWD}}xx | 안녕xx +안녕${PWD:+${XXX}}xx | 안녕xx +안녕${PWD:+${SHELL}}xx | 안녕bashxx +안녕${XXX:+000}xx | 안녕xx +안녕${PWD:+000}xx | 안녕000xx +'안녕${XX}' | 안녕${XX} +"안녕${PWD}" | 안녕/home +"안녕'$PWD'" | 안녕'/home' +'"안녕"' | "안녕" +안녕\$PWD | 안녕$PWD +"안녕\$PWD" | 안녕$PWD +'안녕\$PWD' | 안녕\$PWD +안녕${PWD | error +안녕${PWD:=000}xx | error +안녕${PWD:+${PWD}:}xx | 안녕/home:xx +안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx +안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx +$KOREAN | 한국어 +안녕$KOREAN | 안녕한국어 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go new file mode 100644 index 00000000..270e3a4f --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go @@ -0,0 +1,215 @@ +// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling NewBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of responsibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). +package dockerfile + +import ( + "fmt" + "runtime" + "strings" + + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/builder/dockerfile/parser" +) + +// Environment variable interpolation will happen on these statements only. +var replaceEnvAllowed = map[string]bool{ + command.Env: true, + command.Label: true, + command.Add: true, + command.Copy: true, + command.Workdir: true, + command.Expose: true, + command.Volume: true, + command.User: true, + command.StopSignal: true, + command.Arg: true, +} + +// Certain commands are allowed to have their args split into more +// words after env var replacements. Meaning: +// ENV foo="123 456" +// EXPOSE $foo +// should result in the same thing as: +// EXPOSE 123 456 +// and not treat "123 456" as a single word. +// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing. +// Quotes will cause it to still be treated as single word. +var allowWordExpansion = map[string]bool{ + command.Expose: true, +} + +var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error + +func init() { + evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{ + command.Env: env, + command.Label: label, + command.Maintainer: maintainer, + command.Add: add, + command.Copy: dispatchCopy, // copy() is a go builtin + command.From: from, + command.Onbuild: onbuild, + command.Workdir: workdir, + command.Run: run, + command.Cmd: cmd, + command.Entrypoint: entrypoint, + command.Expose: expose, + command.Volume: volume, + command.User: user, + command.StopSignal: stopSignal, + command.Arg: arg, + } +} + +// This method is the entrypoint to all statement handling routines. +// +// Almost all nodes will have this structure: +// Child[Node, Node, Node] where Child is from parser.Node.Children and each +// node comes from parser.Node.Next. This forms a "line" with a statement and +// arguments and we process them in this normalized form by hitting +// evaluateTable with the leaf nodes of the command and the Builder object. +// +// ONBUILD is a special case; in this case the parser will emit: +// Child[Node, Child[Node, Node...]] where the first node is the literal +// "onbuild" and the child entrypoint is the command of the ONBUILD statement, +// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to +// deal with that, at least until it becomes more of a general concern with new +// features. +func (b *Builder) dispatch(stepN int, ast *parser.Node) error { + cmd := ast.Value + upperCasedCmd := strings.ToUpper(cmd) + + // To ensure the user is given a decent error message if the platform + // on which the daemon is running does not support a builder command. + if err := platformSupports(strings.ToLower(cmd)); err != nil { + return err + } + + attrs := ast.Attributes + original := ast.Original + flags := ast.Flags + strList := []string{} + msg := fmt.Sprintf("Step %d : %s", stepN+1, upperCasedCmd) + + if len(ast.Flags) > 0 { + msg += " " + strings.Join(ast.Flags, " ") + } + + if cmd == "onbuild" { + if ast.Next == nil { + return fmt.Errorf("ONBUILD requires at least one argument") + } + ast = ast.Next.Children[0] + strList = append(strList, ast.Value) + msg += " " + ast.Value + + if len(ast.Flags) > 0 { + msg += " " + strings.Join(ast.Flags, " ") + } + + } + + // count the number of nodes that we are going to traverse first + // so we can pre-create the argument and message array. This speeds up the + // allocation of those list a lot when they have a lot of arguments + cursor := ast + var n int + for cursor.Next != nil { + cursor = cursor.Next + n++ + } + msgList := make([]string, n) + + var i int + // Append the build-time args to config-environment. + // This allows builder config to override the variables, making the behavior similar to + // a shell script i.e. `ENV foo bar` overrides value of `foo` passed in build + // context. But `ENV foo $foo` will use the value from build context if one + // isn't already been defined by a previous ENV primitive. + // Note, we get this behavior because we know that ProcessWord() will + // stop on the first occurrence of a variable name and not notice + // a subsequent one. So, putting the buildArgs list after the Config.Env + // list, in 'envs', is safe. + envs := b.runConfig.Env + for key, val := range b.options.BuildArgs { + if !b.isBuildArgAllowed(key) { + // skip build-args that are not in allowed list, meaning they have + // not been defined by an "ARG" Dockerfile command yet. + // This is an error condition but only if there is no "ARG" in the entire + // Dockerfile, so we'll generate any necessary errors after we parsed + // the entire file (see 'leftoverArgs' processing in evaluator.go ) + continue + } + envs = append(envs, fmt.Sprintf("%s=%s", key, val)) + } + for ast.Next != nil { + ast = ast.Next + var str string + str = ast.Value + if replaceEnvAllowed[cmd] { + var err error + var words []string + + if allowWordExpansion[cmd] { + words, err = ProcessWords(str, envs) + if err != nil { + return err + } + strList = append(strList, words...) + } else { + str, err = ProcessWord(str, envs) + if err != nil { + return err + } + strList = append(strList, str) + } + } else { + strList = append(strList, str) + } + msgList[i] = ast.Value + i++ + } + + msg += " " + strings.Join(msgList, " ") + fmt.Fprintln(b.Stdout, msg) + + // XXX yes, we skip any cmds that are not valid; the parser should have + // picked these out already. + if f, ok := evaluateTable[cmd]; ok { + b.flags = NewBFlags() + b.flags.Args = flags + return f(b, strList, attrs, original) + } + + return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) +} + +// platformSupports is a short-term function to give users a quality error +// message if a Dockerfile uses a command not supported on the platform. +func platformSupports(command string) error { + if runtime.GOOS != "windows" { + return nil + } + switch command { + case "expose", "user", "stopsignal", "arg": + return fmt.Errorf("The daemon on this platform does not support the command '%s'", command) + } + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals.go b/vendor/github.com/docker/docker/builder/dockerfile/internals.go new file mode 100644 index 00000000..a9be9fdd --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals.go @@ -0,0 +1,662 @@ +package dockerfile + +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/strslice" +) + +func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) error { + if b.disableCommit { + return nil + } + if b.image == "" && !b.noBaseImage { + return fmt.Errorf("Please provide a source image with `from` prior to commit") + } + b.runConfig.Image = b.image + + if id == "" { + cmd := b.runConfig.Cmd + if runtime.GOOS != "windows" { + b.runConfig.Cmd = strslice.StrSlice{"/bin/sh", "-c", "#(nop) " + comment} + } else { + b.runConfig.Cmd = strslice.StrSlice{"cmd", "/S /C", "REM (nop) " + comment} + } + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } else if hit { + return nil + } + id, err = b.create() + if err != nil { + return err + } + } + + // Note: Actually copy the struct + autoConfig := *b.runConfig + autoConfig.Cmd = autoCmd + + commitCfg := &types.ContainerCommitConfig{ + Author: b.maintainer, + Pause: true, + Config: &autoConfig, + } + + // Commit the container + imageID, err := b.docker.Commit(id, commitCfg) + if err != nil { + return err + } + + b.image = imageID + return nil +} + +type copyInfo struct { + builder.FileInfo + decompress bool +} + +func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { + if b.context == nil { + return fmt.Errorf("No context given. Impossible to use %s", cmdName) + } + + if len(args) < 2 { + return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) + } + + // Work in daemon-specific filepath semantics + dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest + + b.runConfig.Image = b.image + + var infos []copyInfo + + // Loop through each src file and calculate the info we need to + // do the copy (e.g. hash value if cached). Don't actually do + // the copy until we've looked at all src files + var err error + for _, orig := range args[0 : len(args)-1] { + var fi builder.FileInfo + decompress := allowLocalDecompression + if urlutil.IsURL(orig) { + if !allowRemote { + return fmt.Errorf("Source can't be a URL for %s", cmdName) + } + fi, err = b.download(orig) + if err != nil { + return err + } + defer os.RemoveAll(filepath.Dir(fi.Path())) + decompress = false + infos = append(infos, copyInfo{fi, decompress}) + continue + } + // not a URL + subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) + if err != nil { + return err + } + + infos = append(infos, subInfos...) + } + + if len(infos) == 0 { + return fmt.Errorf("No source files were specified") + } + if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { + return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + + // For backwards compat, if there's just one info then use it as the + // cache look-up string, otherwise hash 'em all into one + var srcHash string + var origPaths string + + if len(infos) == 1 { + fi := infos[0].FileInfo + origPaths = fi.Name() + if hfi, ok := fi.(builder.Hashed); ok { + srcHash = hfi.Hash() + } + } else { + var hashs []string + var origs []string + for _, info := range infos { + fi := info.FileInfo + origs = append(origs, fi.Name()) + if hfi, ok := fi.(builder.Hashed); ok { + hashs = append(hashs, hfi.Hash()) + } + } + hasher := sha256.New() + hasher.Write([]byte(strings.Join(hashs, ","))) + srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) + origPaths = strings.Join(origs, " ") + } + + cmd := b.runConfig.Cmd + if runtime.GOOS != "windows" { + b.runConfig.Cmd = strslice.StrSlice{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)} + } else { + b.runConfig.Cmd = strslice.StrSlice{"cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest)} + } + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + + if hit, err := b.probeCache(); err != nil { + return err + } else if hit { + return nil + } + + container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}) + if err != nil { + return err + } + b.tmpContainers[container.ID] = struct{}{} + + comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) + + // Twiddle the destination when its a relative path - meaning, make it + // relative to the WORKINGDIR + if !system.IsAbs(dest) { + hasSlash := strings.HasSuffix(dest, string(os.PathSeparator)) + dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.runConfig.WorkingDir), dest) + + // Make sure we preserve any trailing slash + if hasSlash { + dest += string(os.PathSeparator) + } + } + + for _, info := range infos { + if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil { + return err + } + } + + return b.commit(container.ID, cmd, comment) +} + +func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { + // get filename from URL + u, err := url.Parse(srcURL) + if err != nil { + return + } + path := filepath.FromSlash(u.Path) // Ensure in platform semantics + if strings.HasSuffix(path, string(os.PathSeparator)) { + path = path[:len(path)-1] + } + parts := strings.Split(path, string(os.PathSeparator)) + filename := parts[len(parts)-1] + if filename == "" { + err = fmt.Errorf("cannot determine filename from url: %s", u) + return + } + + // Initiate the download + resp, err := httputils.Download(srcURL) + if err != nil { + return + } + + // Prepare file in a tmp dir + tmpDir, err := ioutils.TempDir("", "docker-remote") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + tmpFileName := filepath.Join(tmpDir, filename) + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return + } + + stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) + progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) + progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") + // Download and dump result to tmp file + if _, err = io.Copy(tmpFile, progressReader); err != nil { + tmpFile.Close() + return + } + fmt.Fprintln(b.Stdout) + // ignoring error because the file was already opened successfully + tmpFileSt, err := tmpFile.Stat() + if err != nil { + return + } + tmpFile.Close() + + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + mTime := time.Time{} + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + + if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { + return + } + + // Calc the checksum, even if we're using the cache + r, err := archive.Tar(tmpFileName, archive.Uncompressed) + if err != nil { + return + } + tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) + if err != nil { + return + } + if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { + return + } + hash := tarSum.Sum(nil) + r.Close() + return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil +} + +func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) { + + // Work in daemon-specific OS filepath semantics + origPath = filepath.FromSlash(origPath) + + if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { + origPath = origPath[1:] + } + origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) + + // Deal with wildcards + if allowWildcards && containsWildcards(origPath) { + var copyInfos []copyInfo + if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error { + if err != nil { + return err + } + if info.Name() == "" { + // Why are we doing this check? + return nil + } + if match, _ := filepath.Match(origPath, path); !match { + return nil + } + + // Note we set allowWildcards to false in case the name has + // a * in it + subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false) + if err != nil { + return err + } + copyInfos = append(copyInfos, subInfos...) + return nil + }); err != nil { + return nil, err + } + return copyInfos, nil + } + + // Must be a dir or a file + + statPath, fi, err := b.context.Stat(origPath) + if err != nil { + return nil, err + } + + copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}} + + hfi, handleHash := fi.(builder.Hashed) + if !handleHash { + return copyInfos, nil + } + + // Deal with the single file case + if !fi.IsDir() { + hfi.SetHash("file:" + hfi.Hash()) + return copyInfos, nil + } + // Must be a dir + var subfiles []string + err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { + if err != nil { + return err + } + // we already checked handleHash above + subfiles = append(subfiles, info.(builder.Hashed).Hash()) + return nil + }) + if err != nil { + return nil, err + } + + sort.Strings(subfiles) + hasher := sha256.New() + hasher.Write([]byte(strings.Join(subfiles, ","))) + hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil))) + + return copyInfos, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func (b *Builder) processImageFrom(img builder.Image) error { + if img != nil { + b.image = img.ImageID() + + if img.RunConfig() != nil { + b.runConfig = img.RunConfig() + } + } + + // Check to see if we have a default PATH, note that windows won't + // have one as its set by HCS + if system.DefaultPathEnv != "" { + // Convert the slice of strings that represent the current list + // of env vars into a map so we can see if PATH is already set. + // If its not set then go ahead and give it our default value + configEnv := opts.ConvertKVStringsToMap(b.runConfig.Env) + if _, ok := configEnv["PATH"]; !ok { + b.runConfig.Env = append(b.runConfig.Env, + "PATH="+system.DefaultPathEnv) + } + } + + if img == nil { + // Typically this means they used "FROM scratch" + return nil + } + + // Process ONBUILD triggers if they exist + if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 { + word := "trigger" + if nTriggers > 1 { + word = "triggers" + } + fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word) + } + + // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. + onBuildTriggers := b.runConfig.OnBuild + b.runConfig.OnBuild = []string{} + + // parse the ONBUILD triggers by invoking the parser + for _, step := range onBuildTriggers { + ast, err := parser.Parse(strings.NewReader(step)) + if err != nil { + return err + } + + for i, n := range ast.Children { + switch strings.ToUpper(n.Value) { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) + } + + if err := b.dispatch(i, n); err != nil { + return err + } + } + } + + return nil +} + +// probeCache checks if `b.docker` implements builder.ImageCache and image-caching +// is enabled (`b.UseCache`). +// If so attempts to look up the current `b.image` and `b.runConfig` pair with `b.docker`. +// If an image is found, probeCache returns `(true, nil)`. +// If no image is found, it returns `(false, nil)`. +// If there is any error, it returns `(false, err)`. +func (b *Builder) probeCache() (bool, error) { + c, ok := b.docker.(builder.ImageCache) + if !ok || b.options.NoCache || b.cacheBusted { + return false, nil + } + cache, err := c.GetCachedImageOnBuild(b.image, b.runConfig) + if err != nil { + return false, err + } + if len(cache) == 0 { + logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd) + b.cacheBusted = true + return false, nil + } + + fmt.Fprintf(b.Stdout, " ---> Using cache\n") + logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd) + b.image = string(cache) + + return true, nil +} + +func (b *Builder) create() (string, error) { + if b.image == "" && !b.noBaseImage { + return "", fmt.Errorf("Please provide a source image with `from` prior to run") + } + b.runConfig.Image = b.image + + resources := container.Resources{ + CgroupParent: b.options.CgroupParent, + CPUShares: b.options.CPUShares, + CPUPeriod: b.options.CPUPeriod, + CPUQuota: b.options.CPUQuota, + CpusetCpus: b.options.CPUSetCPUs, + CpusetMems: b.options.CPUSetMems, + Memory: b.options.Memory, + MemorySwap: b.options.MemorySwap, + Ulimits: b.options.Ulimits, + } + + // TODO: why not embed a hostconfig in builder? + hostConfig := &container.HostConfig{ + Isolation: b.options.Isolation, + ShmSize: b.options.ShmSize, + Resources: resources, + } + + config := *b.runConfig + + // Create the container + c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ + Config: b.runConfig, + HostConfig: hostConfig, + }) + if err != nil { + return "", err + } + for _, warning := range c.Warnings { + fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) + } + + b.tmpContainers[c.ID] = struct{}{} + fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID)) + + // override the entry point that may have been picked up from the base image + if err := b.docker.ContainerUpdateCmdOnBuild(c.ID, config.Cmd); err != nil { + return "", err + } + + return c.ID, nil +} + +func (b *Builder) run(cID string) (err error) { + errCh := make(chan error) + go func() { + errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true) + }() + + finished := make(chan struct{}) + defer close(finished) + go func() { + select { + case <-b.cancelled: + logrus.Debugln("Build cancelled, killing and removing container:", cID) + b.docker.ContainerKill(cID, 0) + b.removeContainer(cID) + case <-finished: + } + }() + + if err := b.docker.ContainerStart(cID, nil); err != nil { + return err + } + + // Block on reading output from container, stop on err or chan closed + if err := <-errCh; err != nil { + return err + } + + if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 { + // TODO: change error type, because jsonmessage.JSONError assumes HTTP + return &jsonmessage.JSONError{ + Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(b.runConfig.Cmd, " "), ret), + Code: ret, + } + } + + return nil +} + +func (b *Builder) removeContainer(c string) error { + rmConfig := &types.ContainerRmConfig{ + ForceRemove: true, + RemoveVolume: true, + } + if err := b.docker.ContainerRm(c, rmConfig); err != nil { + fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) + return err + } + return nil +} + +func (b *Builder) clearTmp() { + for c := range b.tmpContainers { + if err := b.removeContainer(c); err != nil { + return + } + delete(b.tmpContainers, c) + fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c)) + } +} + +// readDockerfile reads a Dockerfile from the current context. +func (b *Builder) readDockerfile() error { + // If no -f was specified then look for 'Dockerfile'. If we can't find + // that then look for 'dockerfile'. If neither are found then default + // back to 'Dockerfile' and use that in the error message. + if b.options.Dockerfile == "" { + b.options.Dockerfile = builder.DefaultDockerfileName + if _, _, err := b.context.Stat(b.options.Dockerfile); os.IsNotExist(err) { + lowercase := strings.ToLower(b.options.Dockerfile) + if _, _, err := b.context.Stat(lowercase); err == nil { + b.options.Dockerfile = lowercase + } + } + } + + f, err := b.context.Open(b.options.Dockerfile) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile) + } + return err + } + if f, ok := f.(*os.File); ok { + // ignoring error because Open already succeeded + fi, err := f.Stat() + if err != nil { + return fmt.Errorf("Unexpected error reading Dockerfile: %v", err) + } + if fi.Size() == 0 { + return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile) + } + } + b.dockerfile, err = parser.Parse(f) + f.Close() + if err != nil { + return err + } + + // After the Dockerfile has been parsed, we need to check the .dockerignore + // file for either "Dockerfile" or ".dockerignore", and if either are + // present then erase them from the build context. These files should never + // have been sent from the client but we did send them to make sure that + // we had the Dockerfile to actually parse, and then we also need the + // .dockerignore file to know whether either file should be removed. + // Note that this assumes the Dockerfile has been read into memory and + // is now safe to be removed. + if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok { + dockerIgnore.Process([]string{b.options.Dockerfile}) + } + return nil +} + +// determine if build arg is part of built-in args or user +// defined args in Dockerfile at any point in time. +func (b *Builder) isBuildArgAllowed(arg string) bool { + if _, ok := BuiltinAllowedBuildArgs[arg]; ok { + return true + } + if _, ok := b.allowedBuildArgs[arg]; ok { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go new file mode 100644 index 00000000..1d7ece43 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go @@ -0,0 +1,331 @@ +package parser + +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "unicode" +) + +var ( + errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.") +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string) (*Node, map[string]bool, error) { + return &Node{}, nil, nil +} + +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +// +func parseSubCommand(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + _, child, err := ParseLine(rest) + if err != nil { + return nil, nil, err + } + + return &Node{Children: []*Node{child}}, nil, nil +} + +// helper to parse words (i.e space delimited or quoted strings) in a statement. +// The quotes are preserved as part of this function and they are stripped later +// as part of processWords(). +func parseWords(rest string) []string { + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(rest); pos++ { + if pos != len(rest) { + ch = rune(rest[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(rest) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + phase = inWord // found it, fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(rest)) { + if blankOK || len(word) > 0 { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + } + if ch == '\\' { + if pos+1 == len(rest) { + continue // just skip \ at end + } + // If we're not quoted and we see a \, then always just + // add \ plus the char to the word, even if the char + // is a quote. + word += string(ch) + pos++ + ch = rune(rest[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + } + // \ is special except for ' quotes - can't escape anything for ' + if ch == '\\' && quote != '\'' { + if pos+1 == len(rest) { + phase = inWord + continue // just skip \ at end + } + pos++ + nextCh := rune(rest[pos]) + word += string(ch) + ch = nextCh + } + word += string(ch) + } + } + + return words +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseNameVal(rest string, key string) (*Node, map[string]bool, error) { + // This is kind of tricky because we need to support the old + // variant: KEY name value + // as well as the new one: KEY name=value ... + // The trigger to know which one is being used will be whether we hit + // a space or = first. space ==> old, "=" ==> new + + words := parseWords(rest) + if len(words) == 0 { + return nil, nil, nil + } + + var rootnode *Node + + // Old format (KEY name value) + if !strings.Contains(words[0], "=") { + node := &Node{} + rootnode = node + strs := tokenWhitespace.Split(rest, 2) + + if len(strs) < 2 { + return nil, nil, fmt.Errorf(key + " must have two arguments") + } + + node.Value = strs[0] + node.Next = &Node{} + node.Next.Value = strs[1] + } else { + var prevNode *Node + for i, word := range words { + if !strings.Contains(word, "=") { + return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + } + parts := strings.SplitN(word, "=", 2) + + name := &Node{} + value := &Node{} + + name.Next = value + name.Value = parts[0] + value.Value = parts[1] + + if i == 0 { + rootnode = name + } else { + prevNode.Next = name + } + prevNode = value + } + } + + return rootnode, nil, nil +} + +func parseEnv(rest string) (*Node, map[string]bool, error) { + return parseNameVal(rest, "ENV") +} + +func parseLabel(rest string) (*Node, map[string]bool, error) { + return parseNameVal(rest, "LABEL") +} + +// parses a statement containing one or more keyword definition(s) and/or +// value assignments, like `name1 name2= name3="" name4=value`. +// Note that this is a stricter format than the old format of assignment, +// allowed by parseNameVal(), in a way that this only allows assignment of the +// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. +// In addition, a keyword definition alone is of the form `keyword` like `name1` +// above. And the assignments `name2=` and `name3=""` are equivalent and +// assign an empty value to the respective keywords. +func parseNameOrNameVal(rest string) (*Node, map[string]bool, error) { + words := parseWords(rest) + if len(words) == 0 { + return nil, nil, nil + } + + var ( + rootnode *Node + prevNode *Node + ) + for i, word := range words { + node := &Node{} + node.Value = word + if i == 0 { + rootnode = node + } else { + prevNode.Next = node + } + prevNode = node + } + + return rootnode, nil, nil +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node := &Node{} + rootnode := node + prevnode := node + for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp + prevnode = node + node.Value = str + node.Next = &Node{} + node = node.Next + } + + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + + return rootnode, nil, nil +} + +// parsestring just wraps the string in quotes and returns a working node. +func parseString(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + n := &Node{} + n.Value = rest + return n, nil, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string) (*Node, map[string]bool, error) { + rest = strings.TrimLeftFunc(rest, unicode.IsSpace) + if !strings.HasPrefix(rest, "[") { + return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) + } + + var myJSON []interface{} + if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { + return nil, nil, err + } + + var top, prev *Node + for _, str := range myJSON { + s, ok := str.(string) + if !ok { + return nil, nil, errDockerfileNotStringArray + } + + node := &Node{Value: s} + if prev == nil { + top = node + } else { + prev.Next = node + } + prev = node + } + + return top, map[string]bool{"json": true}, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node, attrs, err := parseJSON(rest) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + node = &Node{} + node.Value = rest + return node, nil, nil +} + +// parseMaybeJSONToList determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, attempts to parse it as a whitespace +// delimited string. +func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) { + node, attrs, err := parseJSON(rest) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + return parseStringsWhitespaceDelimited(rest) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go new file mode 100644 index 00000000..ece601a9 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go @@ -0,0 +1,161 @@ +// Package parser implements a parser and parse tree dumper for Dockerfiles. +package parser + +import ( + "bufio" + "io" + "regexp" + "strings" + "unicode" + + "github.com/docker/docker/builder/dockerfile/command" +) + +// Node is a structure used to represent a parse tree. +// +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: +// +// (value next (child child-next child-next-next) next-next) +// +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. +// +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing + Flags []string // only top Node should have this set + StartLine int // the line in the original dockerfile where the node begins + EndLine int // the line in the original dockerfile where the node ends +} + +var ( + dispatch map[string]func(string) (*Node, map[string]bool, error) + tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + tokenLineContinuation = regexp.MustCompile(`\\[ \t]*$`) + tokenComment = regexp.MustCompile(`^#.*$`) +) + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // receives the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propagated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string) (*Node, map[string]bool, error){ + command.User: parseString, + command.Onbuild: parseSubCommand, + command.Workdir: parseString, + command.Env: parseEnv, + command.Label: parseLabel, + command.Maintainer: parseString, + command.From: parseString, + command.Add: parseMaybeJSONToList, + command.Copy: parseMaybeJSONToList, + command.Run: parseMaybeJSON, + command.Cmd: parseMaybeJSON, + command.Entrypoint: parseMaybeJSON, + command.Expose: parseStringsWhitespaceDelimited, + command.Volume: parseMaybeJSONToList, + command.StopSignal: parseString, + command.Arg: parseNameOrNameVal, + } +} + +// ParseLine parse a line and return the remainder. +func ParseLine(line string) (string, *Node, error) { + if line = stripComments(line); line == "" { + return "", nil, nil + } + + if tokenLineContinuation.MatchString(line) { + line = tokenLineContinuation.ReplaceAllString(line, "") + return line, nil, nil + } + + cmd, flags, args, err := splitCommand(line) + if err != nil { + return "", nil, err + } + + node := &Node{} + node.Value = cmd + + sexp, attrs, err := fullDispatch(cmd, args) + if err != nil { + return "", nil, err + } + + node.Next = sexp + node.Attributes = attrs + node.Original = line + node.Flags = flags + + return "", node, nil +} + +// Parse is the main parse routine. +// It handles an io.ReadWriteCloser and returns the root of the AST. +func Parse(rwc io.Reader) (*Node, error) { + currentLine := 0 + root := &Node{} + root.StartLine = -1 + scanner := bufio.NewScanner(rwc) + + for scanner.Scan() { + scannedLine := strings.TrimLeftFunc(scanner.Text(), unicode.IsSpace) + currentLine++ + line, child, err := ParseLine(scannedLine) + if err != nil { + return nil, err + } + startLine := currentLine + + if line != "" && child == nil { + for scanner.Scan() { + newline := scanner.Text() + currentLine++ + + if stripComments(strings.TrimSpace(newline)) == "" { + continue + } + + line, child, err = ParseLine(line + newline) + if err != nil { + return nil, err + } + + if child != nil { + break + } + } + if child == nil && line != "" { + _, child, err = ParseLine(line) + if err != nil { + return nil, err + } + } + } + + if child != nil { + // Update the line information for the current child. + child.StartLine = startLine + child.EndLine = currentLine + // Update the line information for the root. The starting line of the root is always the + // starting line of the first child and the ending line is the ending line of the last child. + if root.StartLine < 0 { + root.StartLine = currentLine + } + root.EndLine = currentLine + root.Children = append(root.Children, child) + } + } + + return root, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go new file mode 100644 index 00000000..b21eb62a --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go @@ -0,0 +1,176 @@ +package parser + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +// Dump dumps the AST defined by `node` as a list of sexps. +// Returns a string suitable for printing. +func (node *Node) Dump() string { + str := "" + str += node.Value + + if len(node.Flags) > 0 { + str += fmt.Sprintf(" %q", node.Flags) + } + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + if node.Next != nil { + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + strconv.Quote(n.Value) + } + } + } + + return strings.TrimSpace(str) +} + +// performs the dispatch based on the two primal strings, cmd and args. Please +// look at the dispatch table in parser.go to see how these dispatchers work. +func fullDispatch(cmd, args string) (*Node, map[string]bool, error) { + fn := dispatch[cmd] + + // Ignore invalid Dockerfile instructions + if fn == nil { + fn = parseIgnore + } + + sexp, attrs, err := fn(args) + if err != nil { + return nil, nil, err + } + + return sexp, attrs, nil +} + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, []string, string, error) { + var args string + var flags []string + + // Make sure we get the same results irrespective of leading/trailing spaces + cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) + cmd := strings.ToLower(cmdline[0]) + + if len(cmdline) == 2 { + var err error + args, flags, err = extractBuilderFlags(cmdline[1]) + if err != nil { + return "", nil, "", err + } + } + + return cmd, flags, strings.TrimSpace(args), nil +} + +// covers comments and empty lines. Lines should be trimmed before passing to +// this function. +func stripComments(line string) string { + // string is already trimmed at this point + if tokenComment.MatchString(line) { + return tokenComment.ReplaceAllString(line, "") + } + + return line +} + +func extractBuilderFlags(line string) (string, []string, error) { + // Parses the BuilderFlags and returns the remaining part of the line + + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(line); pos++ { + if pos != len(line) { + ch = rune(line[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(line) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + + // Only keep going if the next word starts with -- + if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { + return line[pos:], words, nil + } + + phase = inWord // found someting with "--", fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(line)) { + if word != "--" && (blankOK || len(word) > 0) { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if word == "--" { + return line[pos:], words, nil + } + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + continue + } + if ch == '\\' { + if pos+1 == len(line) { + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + continue + } + if ch == '\\' { + if pos+1 == len(line) { + phase = inWord + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + } + } + + return "", words, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go new file mode 100644 index 00000000..c7142667 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go @@ -0,0 +1,314 @@ +package dockerfile + +// This will take a single word and an array of env variables and +// process all quotes (" and ') as well as $xxx and ${xxx} env variable +// tokens. Tries to mimic bash shell process. +// It doesn't support all flavors of ${xx:...} formats but new ones can +// be added by adding code to the "special ${} format processing" section + +import ( + "fmt" + "strings" + "text/scanner" + "unicode" +) + +type shellWord struct { + word string + scanner scanner.Scanner + envs []string + pos int +} + +// ProcessWord will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func ProcessWord(word string, env []string) (string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + } + sw.scanner.Init(strings.NewReader(word)) + word, _, err := sw.process() + return word, err +} + +// ProcessWords will use the 'env' list of environment variables, +// and replace any env var references in 'word' then it will also +// return a slice of strings which represents the 'word' +// split up based on spaces - taking into account quotes. Note that +// this splitting is done **after** the env var substitutions are done. +// Note, each one is trimmed to remove leading and trailing spaces (unless +// they are quoted", but ProcessWord retains spaces between words. +func ProcessWords(word string, env []string) ([]string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + } + sw.scanner.Init(strings.NewReader(word)) + _, words, err := sw.process() + return words, err +} + +func (sw *shellWord) process() (string, []string, error) { + return sw.processStopOn(scanner.EOF) +} + +type wordsStruct struct { + word string + words []string + inWord bool +} + +func (w *wordsStruct) addChar(ch rune) { + if unicode.IsSpace(ch) && w.inWord { + if len(w.word) != 0 { + w.words = append(w.words, w.word) + w.word = "" + w.inWord = false + } + } else if !unicode.IsSpace(ch) { + w.addRawChar(ch) + } +} + +func (w *wordsStruct) addRawChar(ch rune) { + w.word += string(ch) + w.inWord = true +} + +func (w *wordsStruct) addString(str string) { + var scan scanner.Scanner + scan.Init(strings.NewReader(str)) + for scan.Peek() != scanner.EOF { + w.addChar(scan.Next()) + } +} + +func (w *wordsStruct) addRawString(str string) { + w.word += str + w.inWord = true +} + +func (w *wordsStruct) getWords() []string { + if len(w.word) > 0 { + w.words = append(w.words, w.word) + + // Just in case we're called again by mistake + w.word = "" + w.inWord = false + } + return w.words +} + +// Process the word, starting at 'pos', and stop when we get to the +// end of the word or the 'stopChar' character +func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { + var result string + var words wordsStruct + + var charFuncMapping = map[rune]func() (string, error){ + '\'': sw.processSingleQuote, + '"': sw.processDoubleQuote, + '$': sw.processDollar, + } + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + + if stopChar != scanner.EOF && ch == stopChar { + sw.scanner.Next() + break + } + if fn, ok := charFuncMapping[ch]; ok { + // Call special processing func for certain chars + tmp, err := fn() + if err != nil { + return "", []string{}, err + } + result += tmp + + if ch == rune('$') { + words.addString(tmp) + } else { + words.addRawString(tmp) + } + } else { + // Not special, just add it to the result + ch = sw.scanner.Next() + + if ch == '\\' { + // '\' escapes, except end of line + + ch = sw.scanner.Next() + + if ch == scanner.EOF { + break + } + + words.addRawChar(ch) + } else { + words.addChar(ch) + } + + result += string(ch) + } + } + + return result, words.getWords(), nil +} + +func (sw *shellWord) processSingleQuote() (string, error) { + // All chars between single quotes are taken as-is + // Note, you can't escape ' + var result string + + sw.scanner.Next() + + for { + ch := sw.scanner.Next() + if ch == '\'' || ch == scanner.EOF { + break + } + result += string(ch) + } + + return result, nil +} + +func (sw *shellWord) processDoubleQuote() (string, error) { + // All chars up to the next " are taken as-is, even ', except any $ chars + // But you can escape " with a \ + var result string + + sw.scanner.Next() + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if ch == '"' { + sw.scanner.Next() + break + } + if ch == '$' { + tmp, err := sw.processDollar() + if err != nil { + return "", err + } + result += tmp + } else { + ch = sw.scanner.Next() + if ch == '\\' { + chNext := sw.scanner.Peek() + + if chNext == scanner.EOF { + // Ignore \ at end of word + continue + } + + if chNext == '"' || chNext == '$' { + // \" and \$ can be escaped, all other \'s are left as-is + ch = sw.scanner.Next() + } + } + result += string(ch) + } + } + + return result, nil +} + +func (sw *shellWord) processDollar() (string, error) { + sw.scanner.Next() + ch := sw.scanner.Peek() + if ch == '{' { + sw.scanner.Next() + name := sw.processName() + ch = sw.scanner.Peek() + if ch == '}' { + // Normal ${xx} case + sw.scanner.Next() + return sw.getEnv(name), nil + } + if ch == ':' { + // Special ${xx:...} format processing + // Yes it allows for recursive $'s in the ... spot + + sw.scanner.Next() // skip over : + modifier := sw.scanner.Next() + + word, _, err := sw.processStopOn('}') + if err != nil { + return "", err + } + + // Grab the current value of the variable in question so we + // can use to to determine what to do based on the modifier + newValue := sw.getEnv(name) + + switch modifier { + case '+': + if newValue != "" { + newValue = word + } + return newValue, nil + + case '-': + if newValue == "" { + newValue = word + } + return newValue, nil + + default: + return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word) + } + } + return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word) + } + // $xxx case + name := sw.processName() + if name == "" { + return "$", nil + } + return sw.getEnv(name), nil +} + +func (sw *shellWord) processName() string { + // Read in a name (alphanumeric or _) + // If it starts with a numeric then just return $# + var name string + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if len(name) == 0 && unicode.IsDigit(ch) { + ch = sw.scanner.Next() + return string(ch) + } + if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { + break + } + ch = sw.scanner.Next() + name += string(ch) + } + + return name +} + +func (sw *shellWord) getEnv(name string) string { + for _, env := range sw.envs { + i := strings.Index(env, "=") + if i < 0 { + if name == env { + // Should probably never get here, but just in case treat + // it like "var" and "var=" are the same + return "" + } + continue + } + if name != env[:i] { + continue + } + return env[i+1:] + } + return "" +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/support.go b/vendor/github.com/docker/docker/builder/dockerfile/support.go new file mode 100644 index 00000000..38897b2c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/support.go @@ -0,0 +1,16 @@ +package dockerfile + +import "strings" + +func handleJSONArgs(args []string, attributes map[string]bool) []string { + if len(args) == 0 { + return []string{} + } + + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return []string{strings.Join(args, " ")} +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/wordsTest b/vendor/github.com/docker/docker/builder/dockerfile/wordsTest new file mode 100644 index 00000000..fa916c67 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/wordsTest @@ -0,0 +1,25 @@ +hello | hello +hello${hi}bye | hellobye +ENV hi=hi +hello${hi}bye | hellohibye +ENV space=abc def +hello${space}bye | helloabc,defbye +hello"${space}"bye | helloabc defbye +hello "${space}"bye | hello,abc defbye +ENV leading= ab c +hello${leading}def | hello,ab,cdef +hello"${leading}" def | hello ab c,def +hello"${leading}" | hello ab c +hello${leading} | hello,ab,c +# next line MUST have 3 trailing spaces, don't erase them! +ENV trailing=ab c +hello${trailing} | helloab,c +hello${trailing}d | helloab,c,d +hello"${trailing}"d | helloab c d +# next line MUST have 3 trailing spaces, don't erase them! +hel"lo${trailing}" | helloab c +hello" there " | hello there +hello there | hello,there +hello\ there | hello there +hello" there | hello there +hello\" there | hello",there diff --git a/vendor/github.com/docker/docker/builder/tarsum.go b/vendor/github.com/docker/docker/builder/tarsum.go index 48372cb0..201e3ef7 100644 --- a/vendor/github.com/docker/docker/builder/tarsum.go +++ b/vendor/github.com/docker/docker/builder/tarsum.go @@ -138,7 +138,7 @@ func (c *tarSumContext) Walk(root string, walkFn WalkFunc) error { } sum := rel - if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { + if tsInfo := c.sums.GetFile(rel); tsInfo != nil { sum = tsInfo.Sum() } fi := &HashedFileInfo{PathFileInfo{FileInfo: info, FilePath: fullpath}, sum} diff --git a/vendor/github.com/docker/docker/cli/cli.go b/vendor/github.com/docker/docker/cli/cli.go new file mode 100644 index 00000000..8e559fc3 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/cli.go @@ -0,0 +1,200 @@ +package cli + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" + "strings" + + flag "github.com/docker/docker/pkg/mflag" +) + +// Cli represents a command line interface. +type Cli struct { + Stderr io.Writer + handlers []Handler + Usage func() +} + +// Handler holds the different commands Cli will call +// It should have methods with names starting with `Cmd` like: +// func (h myHandler) CmdFoo(args ...string) error +type Handler interface{} + +// Initializer can be optionally implemented by a Handler to +// initialize before each call to one of its commands. +type Initializer interface { + Initialize() error +} + +// New instantiates a ready-to-use Cli. +func New(handlers ...Handler) *Cli { + // make the generic Cli object the first cli handler + // in order to handle `docker help` appropriately + cli := new(Cli) + cli.handlers = append([]Handler{cli}, handlers...) + return cli +} + +// initErr is an error returned upon initialization of a handler implementing Initializer. +type initErr struct{ error } + +func (err initErr) Error() string { + return err.Error() +} + +func (cli *Cli) command(args ...string) (func(...string) error, error) { + for _, c := range cli.handlers { + if c == nil { + continue + } + camelArgs := make([]string, len(args)) + for i, s := range args { + if len(s) == 0 { + return nil, errors.New("empty command") + } + camelArgs[i] = strings.ToUpper(s[:1]) + strings.ToLower(s[1:]) + } + methodName := "Cmd" + strings.Join(camelArgs, "") + method := reflect.ValueOf(c).MethodByName(methodName) + if method.IsValid() { + if c, ok := c.(Initializer); ok { + if err := c.Initialize(); err != nil { + return nil, initErr{err} + } + } + return method.Interface().(func(...string) error), nil + } + } + return nil, errors.New("command not found") +} + +// Run executes the specified command. +func (cli *Cli) Run(args ...string) error { + if len(args) > 1 { + command, err := cli.command(args[:2]...) + switch err := err.(type) { + case nil: + return command(args[2:]...) + case initErr: + return err.error + } + } + if len(args) > 0 { + command, err := cli.command(args[0]) + switch err := err.(type) { + case nil: + return command(args[1:]...) + case initErr: + return err.error + } + cli.noSuchCommand(args[0]) + } + return cli.CmdHelp() +} + +func (cli *Cli) noSuchCommand(command string) { + if cli.Stderr == nil { + cli.Stderr = os.Stderr + } + fmt.Fprintf(cli.Stderr, "docker: '%s' is not a docker command.\nSee 'docker --help'.\n", command) + os.Exit(1) +} + +// CmdHelp displays information on a Docker command. +// +// If more than one command is specified, information is only shown for the first command. +// +// Usage: docker help COMMAND or docker COMMAND --help +func (cli *Cli) CmdHelp(args ...string) error { + if len(args) > 1 { + command, err := cli.command(args[:2]...) + switch err := err.(type) { + case nil: + command("--help") + return nil + case initErr: + return err.error + } + } + if len(args) > 0 { + command, err := cli.command(args[0]) + switch err := err.(type) { + case nil: + command("--help") + return nil + case initErr: + return err.error + } + cli.noSuchCommand(args[0]) + } + + if cli.Usage == nil { + flag.Usage() + } else { + cli.Usage() + } + + return nil +} + +// Subcmd is a subcommand of the main "docker" command. +// A subcommand represents an action that can be performed +// from the Docker command line client. +// +// To see all available subcommands, run "docker --help". +func Subcmd(name string, synopses []string, description string, exitOnError bool) *flag.FlagSet { + var errorHandling flag.ErrorHandling + if exitOnError { + errorHandling = flag.ExitOnError + } else { + errorHandling = flag.ContinueOnError + } + flags := flag.NewFlagSet(name, errorHandling) + flags.Usage = func() { + flags.ShortUsage() + flags.PrintDefaults() + } + + flags.ShortUsage = func() { + options := "" + if flags.FlagCountUndeprecated() > 0 { + options = " [OPTIONS]" + } + + if len(synopses) == 0 { + synopses = []string{""} + } + + // Allow for multiple command usage synopses. + for i, synopsis := range synopses { + lead := "\t" + if i == 0 { + // First line needs the word 'Usage'. + lead = "Usage:\t" + } + + if synopsis != "" { + synopsis = " " + synopsis + } + + fmt.Fprintf(flags.Out(), "\n%sdocker %s%s%s", lead, name, options, synopsis) + } + + fmt.Fprintf(flags.Out(), "\n\n%s\n", description) + } + + return flags +} + +// An StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} diff --git a/vendor/github.com/docker/docker/cli/client.go b/vendor/github.com/docker/docker/cli/client.go new file mode 100644 index 00000000..6a82eb52 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/client.go @@ -0,0 +1,12 @@ +package cli + +import flag "github.com/docker/docker/pkg/mflag" + +// ClientFlags represents flags for the docker client. +type ClientFlags struct { + FlagSet *flag.FlagSet + Common *CommonFlags + PostParse func() + + ConfigDir string +} diff --git a/vendor/github.com/docker/docker/cli/common.go b/vendor/github.com/docker/docker/cli/common.go new file mode 100644 index 00000000..7f6a24ba --- /dev/null +++ b/vendor/github.com/docker/docker/cli/common.go @@ -0,0 +1,80 @@ +package cli + +import ( + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/go-connections/tlsconfig" +) + +// CommonFlags represents flags that are common to both the client and the daemon. +type CommonFlags struct { + FlagSet *flag.FlagSet + PostParse func() + + Debug bool + Hosts []string + LogLevel string + TLS bool + TLSVerify bool + TLSOptions *tlsconfig.Options + TrustKey string +} + +// Command is the struct containing the command name and description +type Command struct { + Name string + Description string +} + +var dockerCommands = []Command{ + {"attach", "Attach to a running container"}, + {"build", "Build an image from a Dockerfile"}, + {"commit", "Create a new image from a container's changes"}, + {"cp", "Copy files/folders between a container and the local filesystem"}, + {"create", "Create a new container"}, + {"diff", "Inspect changes on a container's filesystem"}, + {"events", "Get real time events from the server"}, + {"exec", "Run a command in a running container"}, + {"export", "Export a container's filesystem as a tar archive"}, + {"history", "Show the history of an image"}, + {"images", "List images"}, + {"import", "Import the contents from a tarball to create a filesystem image"}, + {"info", "Display system-wide information"}, + {"inspect", "Return low-level information on a container or image"}, + {"kill", "Kill a running container"}, + {"load", "Load an image from a tar archive or STDIN"}, + {"login", "Log in to a Docker registry"}, + {"logout", "Log out from a Docker registry"}, + {"logs", "Fetch the logs of a container"}, + {"network", "Manage Docker networks"}, + {"pause", "Pause all processes within a container"}, + {"port", "List port mappings or a specific mapping for the CONTAINER"}, + {"ps", "List containers"}, + {"pull", "Pull an image or a repository from a registry"}, + {"push", "Push an image or a repository to a registry"}, + {"rename", "Rename a container"}, + {"restart", "Restart a container"}, + {"rm", "Remove one or more containers"}, + {"rmi", "Remove one or more images"}, + {"run", "Run a command in a new container"}, + {"save", "Save one or more images to a tar archive"}, + {"search", "Search the Docker Hub for images"}, + {"start", "Start one or more stopped containers"}, + {"stats", "Display a live stream of container(s) resource usage statistics"}, + {"stop", "Stop a running container"}, + {"tag", "Tag an image into a repository"}, + {"top", "Display the running processes of a container"}, + {"unpause", "Unpause all processes within a container"}, + {"update", "Update configuration of one or more containers"}, + {"version", "Show the Docker version information"}, + {"volume", "Manage Docker volumes"}, + {"wait", "Block until a container stops, then print its exit code"}, +} + +// DockerCommands stores all the docker command +var DockerCommands = make(map[string]Command) + +func init() { + for _, cmd := range dockerCommands { + DockerCommands[cmd.Name] = cmd + } +} diff --git a/vendor/github.com/docker/docker/cliconfig/config.go b/vendor/github.com/docker/docker/cliconfig/config.go index a5a13039..54e0ea38 100644 --- a/vendor/github.com/docker/docker/cliconfig/config.go +++ b/vendor/github.com/docker/docker/cliconfig/config.go @@ -1,12 +1,15 @@ package cliconfig import ( + "encoding/base64" + "encoding/json" "fmt" "io" + "io/ioutil" "os" "path/filepath" + "strings" - "github.com/docker/docker/cliconfig/configfile" "github.com/docker/docker/pkg/homedir" "github.com/docker/engine-api/types" ) @@ -43,19 +46,94 @@ func SetConfigDir(dir string) { configDir = dir } +// ConfigFile ~/.docker/config.json file info +type ConfigFile struct { + AuthConfigs map[string]types.AuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + PsFormat string `json:"psFormat,omitempty"` + ImagesFormat string `json:"imagesFormat,omitempty"` + DetachKeys string `json:"detachKeys,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + filename string // Note: not serialized - for internal use only +} + // NewConfigFile initializes an empty configuration file for the given filename 'fn' -func NewConfigFile(fn string) *configfile.ConfigFile { - return &configfile.ConfigFile{ +func NewConfigFile(fn string) *ConfigFile { + return &ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), HTTPHeaders: make(map[string]string), - Filename: fn, + filename: fn, } } +// LegacyLoadFromReader reads the non-nested configuration data given and sets up the +// auth config information with given directory and populates the receiver object +func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { + b, err := ioutil.ReadAll(configData) + if err != nil { + return err + } + + if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { + arr := strings.Split(string(b), "\n") + if len(arr) < 2 { + return fmt.Errorf("The Auth config file is empty") + } + authConfig := types.AuthConfig{} + origAuth := strings.Split(arr[0], " = ") + if len(origAuth) != 2 { + return fmt.Errorf("Invalid Auth config file") + } + authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) + if err != nil { + return err + } + authConfig.ServerAddress = defaultIndexserver + configFile.AuthConfigs[defaultIndexserver] = authConfig + } else { + for k, authConfig := range configFile.AuthConfigs { + authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) + if err != nil { + return err + } + authConfig.Auth = "" + authConfig.ServerAddress = k + configFile.AuthConfigs[k] = authConfig + } + } + return nil +} + +// LoadFromReader reads the configuration data given and sets up the auth config +// information with given directory and populates the receiver object +func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { + if err := json.NewDecoder(configData).Decode(&configFile); err != nil { + return err + } + var err error + for addr, ac := range configFile.AuthConfigs { + ac.Username, ac.Password, err = decodeAuth(ac.Auth) + if err != nil { + return err + } + ac.Auth = "" + ac.ServerAddress = addr + configFile.AuthConfigs[addr] = ac + } + return nil +} + +// ContainsAuth returns whether there is authentication configured +// in this file or not. +func (configFile *ConfigFile) ContainsAuth() bool { + return configFile.CredentialsStore != "" || + (configFile.AuthConfigs != nil && len(configFile.AuthConfigs) > 0) +} + // LegacyLoadFromReader is a convenience function that creates a ConfigFile object from // a non-nested reader -func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ +func LegacyLoadFromReader(configData io.Reader) (*ConfigFile, error) { + configFile := ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), } err := configFile.LegacyLoadFromReader(configData) @@ -64,8 +142,8 @@ func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) // LoadFromReader is a convenience function that creates a ConfigFile object from // a reader -func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ +func LoadFromReader(configData io.Reader) (*ConfigFile, error) { + configFile := ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), } err := configFile.LoadFromReader(configData) @@ -73,34 +151,34 @@ func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { } // Load reads the configuration files in the given directory, and sets up -// the auth config information and returns values. +// the auth config information and return values. // FIXME: use the internal golang config parser -func Load(configDir string) (*configfile.ConfigFile, error) { +func Load(configDir string) (*ConfigFile, error) { if configDir == "" { configDir = ConfigDir() } - configFile := configfile.ConfigFile{ + configFile := ConfigFile{ AuthConfigs: make(map[string]types.AuthConfig), - Filename: filepath.Join(configDir, ConfigFileName), + filename: filepath.Join(configDir, ConfigFileName), } // Try happy path first - latest config file - if _, err := os.Stat(configFile.Filename); err == nil { - file, err := os.Open(configFile.Filename) + if _, err := os.Stat(configFile.filename); err == nil { + file, err := os.Open(configFile.filename) if err != nil { - return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) + return &configFile, fmt.Errorf("%s - %v", configFile.filename, err) } defer file.Close() err = configFile.LoadFromReader(file) if err != nil { - err = fmt.Errorf("%s - %v", configFile.Filename, err) + err = fmt.Errorf("%s - %v", configFile.filename, err) } return &configFile, err } else if !os.IsNotExist(err) { // if file is there but we can't stat it for any reason other // than it doesn't exist then stop - return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) + return &configFile, fmt.Errorf("%s - %v", configFile.filename, err) } // Can't find latest config file so check for the old one @@ -123,3 +201,89 @@ func Load(configDir string) (*configfile.ConfigFile, error) { } return &configFile, nil } + +// SaveToWriter encodes and writes out all the authorization information to +// the given writer +func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { + // Encode sensitive data into a new/temp struct + tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) + for k, authConfig := range configFile.AuthConfigs { + authCopy := authConfig + // encode and save the authstring, while blanking out the original fields + authCopy.Auth = encodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + tmpAuthConfigs[k] = authCopy + } + + saveAuthConfigs := configFile.AuthConfigs + configFile.AuthConfigs = tmpAuthConfigs + defer func() { configFile.AuthConfigs = saveAuthConfigs }() + + data, err := json.MarshalIndent(configFile, "", "\t") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Save encodes and writes out all the authorization information +func (configFile *ConfigFile) Save() error { + if configFile.Filename() == "" { + return fmt.Errorf("Can't save config with empty filename") + } + + if err := os.MkdirAll(filepath.Dir(configFile.filename), 0700); err != nil { + return err + } + f, err := os.OpenFile(configFile.filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + return configFile.SaveToWriter(f) +} + +// Filename returns the name of the configuration file +func (configFile *ConfigFile) Filename() string { + return configFile.filename +} + +// encodeAuth creates a base64 encoded string to containing authorization information +func encodeAuth(authConfig *types.AuthConfig) string { + if authConfig.Username == "" && authConfig.Password == "" { + return "" + } + + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// decodeAuth decodes a base64 encoded string and returns username and password +func decodeAuth(authStr string) (string, string, error) { + if authStr == "" { + return "", "", nil + } + + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", fmt.Errorf("Something went wrong decoding auth config") + } + arr := strings.SplitN(string(decoded), ":", 2) + if len(arr) != 2 { + return "", "", fmt.Errorf("Invalid auth configuration file") + } + password := strings.Trim(arr[1], "\x00") + return arr[0], password, nil +} diff --git a/vendor/github.com/docker/docker/cliconfig/configfile/file.go b/vendor/github.com/docker/docker/cliconfig/configfile/file.go deleted file mode 100644 index 7c94e27d..00000000 --- a/vendor/github.com/docker/docker/cliconfig/configfile/file.go +++ /dev/null @@ -1,177 +0,0 @@ -package configfile - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/docker/engine-api/types" -) - -const ( - // This constant is only used for really old config files when the - // URL wasn't saved as part of the config file and it was just - // assumed to be this value. - defaultIndexserver = "https://index.docker.io/v1/" -) - -// ConfigFile ~/.docker/config.json file info -type ConfigFile struct { - AuthConfigs map[string]types.AuthConfig `json:"auths"` - HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` - PsFormat string `json:"psFormat,omitempty"` - ImagesFormat string `json:"imagesFormat,omitempty"` - DetachKeys string `json:"detachKeys,omitempty"` - CredentialsStore string `json:"credsStore,omitempty"` - Filename string `json:"-"` // Note: for internal use only -} - -// LegacyLoadFromReader reads the non-nested configuration data given and sets up the -// auth config information with given directory and populates the receiver object -func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { - b, err := ioutil.ReadAll(configData) - if err != nil { - return err - } - - if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return fmt.Errorf("The Auth config file is empty") - } - authConfig := types.AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return fmt.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return err - } - authConfig.ServerAddress = defaultIndexserver - configFile.AuthConfigs[defaultIndexserver] = authConfig - } else { - for k, authConfig := range configFile.AuthConfigs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return err - } - authConfig.Auth = "" - authConfig.ServerAddress = k - configFile.AuthConfigs[k] = authConfig - } - } - return nil -} - -// LoadFromReader reads the configuration data given and sets up the auth config -// information with given directory and populates the receiver object -func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { - if err := json.NewDecoder(configData).Decode(&configFile); err != nil { - return err - } - var err error - for addr, ac := range configFile.AuthConfigs { - ac.Username, ac.Password, err = decodeAuth(ac.Auth) - if err != nil { - return err - } - ac.Auth = "" - ac.ServerAddress = addr - configFile.AuthConfigs[addr] = ac - } - return nil -} - -// ContainsAuth returns whether there is authentication configured -// in this file or not. -func (configFile *ConfigFile) ContainsAuth() bool { - return configFile.CredentialsStore != "" || - (configFile.AuthConfigs != nil && len(configFile.AuthConfigs) > 0) -} - -// SaveToWriter encodes and writes out all the authorization information to -// the given writer -func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { - // Encode sensitive data into a new/temp struct - tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) - for k, authConfig := range configFile.AuthConfigs { - authCopy := authConfig - // encode and save the authstring, while blanking out the original fields - authCopy.Auth = encodeAuth(&authCopy) - authCopy.Username = "" - authCopy.Password = "" - authCopy.ServerAddress = "" - tmpAuthConfigs[k] = authCopy - } - - saveAuthConfigs := configFile.AuthConfigs - configFile.AuthConfigs = tmpAuthConfigs - defer func() { configFile.AuthConfigs = saveAuthConfigs }() - - data, err := json.MarshalIndent(configFile, "", "\t") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// Save encodes and writes out all the authorization information -func (configFile *ConfigFile) Save() error { - if configFile.Filename == "" { - return fmt.Errorf("Can't save config with empty filename") - } - - if err := os.MkdirAll(filepath.Dir(configFile.Filename), 0700); err != nil { - return err - } - f, err := os.OpenFile(configFile.Filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - defer f.Close() - return configFile.SaveToWriter(f) -} - -// encodeAuth creates a base64 encoded string to containing authorization information -func encodeAuth(authConfig *types.AuthConfig) string { - if authConfig.Username == "" && authConfig.Password == "" { - return "" - } - - authStr := authConfig.Username + ":" + authConfig.Password - msg := []byte(authStr) - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) - base64.StdEncoding.Encode(encoded, msg) - return string(encoded) -} - -// decodeAuth decodes a base64 encoded string and returns username and password -func decodeAuth(authStr string) (string, string, error) { - if authStr == "" { - return "", "", nil - } - - decLen := base64.StdEncoding.DecodedLen(len(authStr)) - decoded := make([]byte, decLen) - authByte := []byte(authStr) - n, err := base64.StdEncoding.Decode(decoded, authByte) - if err != nil { - return "", "", err - } - if n > decLen { - return "", "", fmt.Errorf("Something went wrong decoding auth config") - } - arr := strings.SplitN(string(decoded), ":", 2) - if len(arr) != 2 { - return "", "", fmt.Errorf("Invalid auth configuration file") - } - password := strings.Trim(arr[1], "\x00") - return arr[0], password, nil -} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go b/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go new file mode 100644 index 00000000..510cf8cf --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go @@ -0,0 +1,17 @@ +package credentials + +import ( + "github.com/docker/engine-api/types" +) + +// Store is the interface that any credentials store must implement. +type Store interface { + // Erase removes credentials from the store for a given server. + Erase(serverAddress string) error + // Get retrieves credentials from the store for a given server. + Get(serverAddress string) (types.AuthConfig, error) + // GetAll retrieves all the credentials from the store. + GetAll() (map[string]types.AuthConfig, error) + // Store saves credentials in the store. + Store(authConfig types.AuthConfig) error +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go new file mode 100644 index 00000000..3cdc8c38 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go @@ -0,0 +1,22 @@ +package credentials + +import ( + "github.com/docker/containerd/subreaper/exec" + + "github.com/docker/docker/cliconfig" +) + +// DetectDefaultStore sets the default credentials store +// if the host includes the default store helper program. +func DetectDefaultStore(c *cliconfig.ConfigFile) { + if c.CredentialsStore != "" { + // user defined + return + } + + if defaultCredentialsStore != "" { + if _, err := exec.LookPath(remoteCredentialsPrefix + defaultCredentialsStore); err == nil { + c.CredentialsStore = defaultCredentialsStore + } + } +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go new file mode 100644 index 00000000..63e8ed40 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go @@ -0,0 +1,3 @@ +package credentials + +const defaultCredentialsStore = "osxkeychain" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go new file mode 100644 index 00000000..864c540f --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go @@ -0,0 +1,3 @@ +package credentials + +const defaultCredentialsStore = "secretservice" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go new file mode 100644 index 00000000..519ef53d --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go @@ -0,0 +1,5 @@ +// +build !windows,!darwin,!linux + +package credentials + +const defaultCredentialsStore = "" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go new file mode 100644 index 00000000..8e7edd62 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go @@ -0,0 +1,67 @@ +package credentials + +import ( + "strings" + + "github.com/docker/docker/cliconfig" + "github.com/docker/engine-api/types" +) + +// fileStore implements a credentials store using +// the docker configuration file to keep the credentials in plain text. +type fileStore struct { + file *cliconfig.ConfigFile +} + +// NewFileStore creates a new file credentials store. +func NewFileStore(file *cliconfig.ConfigFile) Store { + return &fileStore{ + file: file, + } +} + +// Erase removes the given credentials from the file store. +func (c *fileStore) Erase(serverAddress string) error { + delete(c.file.AuthConfigs, serverAddress) + return c.file.Save() +} + +// Get retrieves credentials for a specific server from the file store. +func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { + authConfig, ok := c.file.AuthConfigs[serverAddress] + if !ok { + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for registry, ac := range c.file.AuthConfigs { + if serverAddress == convertToHostname(registry) { + return ac, nil + } + } + + authConfig = types.AuthConfig{} + } + return authConfig, nil +} + +func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { + return c.file.AuthConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *fileStore) Store(authConfig types.AuthConfig) error { + c.file.AuthConfigs[authConfig.ServerAddress] = authConfig + return c.file.Save() +} + +func convertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.Replace(url, "http://", "", 1) + } else if strings.HasPrefix(url, "https://") { + stripped = strings.Replace(url, "https://", "", 1) + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go new file mode 100644 index 00000000..9b8997dd --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go @@ -0,0 +1,196 @@ +package credentials + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cliconfig" + "github.com/docker/engine-api/types" +) + +const ( + remoteCredentialsPrefix = "docker-credential-" + tokenUsername = "" +) + +// Standarize the not found error, so every helper returns +// the same message and docker can handle it properly. +var errCredentialsNotFound = errors.New("credentials not found in native keychain") + +// command is an interface that remote executed commands implement. +type command interface { + Output() ([]byte, error) + Input(in io.Reader) +} + +// credentialsRequest holds information shared between docker and a remote credential store. +type credentialsRequest struct { + ServerURL string + Username string + Secret string +} + +// credentialsGetResponse is the information serialized from a remote store +// when the plugin sends requests to get the user credentials. +type credentialsGetResponse struct { + Username string + Secret string +} + +// nativeStore implements a credentials store +// using native keychain to keep credentials secure. +// It piggybacks into a file store to keep users' emails. +type nativeStore struct { + commandFn func(args ...string) command + fileStore Store +} + +// NewNativeStore creates a new native store that +// uses a remote helper program to manage credentials. +func NewNativeStore(file *cliconfig.ConfigFile) Store { + return &nativeStore{ + commandFn: shellCommandFn(file.CredentialsStore), + fileStore: NewFileStore(file), + } +} + +// Erase removes the given credentials from the native store. +func (c *nativeStore) Erase(serverAddress string) error { + if err := c.eraseCredentialsFromStore(serverAddress); err != nil { + return err + } + + // Fallback to plain text store to remove email + return c.fileStore.Erase(serverAddress) +} + +// Get retrieves credentials for a specific server from the native store. +func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { + // load user email if it exist or an empty auth config. + auth, _ := c.fileStore.Get(serverAddress) + + creds, err := c.getCredentialsFromStore(serverAddress) + if err != nil { + return auth, err + } + auth.Username = creds.Username + auth.IdentityToken = creds.IdentityToken + auth.Password = creds.Password + + return auth, nil +} + +// GetAll retrieves all the credentials from the native store. +func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { + auths, _ := c.fileStore.GetAll() + + for s, ac := range auths { + creds, _ := c.getCredentialsFromStore(s) + ac.Username = creds.Username + ac.Password = creds.Password + ac.IdentityToken = creds.IdentityToken + auths[s] = ac + } + + return auths, nil +} + +// Store saves the given credentials in the file store. +func (c *nativeStore) Store(authConfig types.AuthConfig) error { + if err := c.storeCredentialsInStore(authConfig); err != nil { + return err + } + authConfig.Username = "" + authConfig.Password = "" + authConfig.IdentityToken = "" + + // Fallback to old credential in plain text to save only the email + return c.fileStore.Store(authConfig) +} + +// storeCredentialsInStore executes the command to store the credentials in the native store. +func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { + cmd := c.commandFn("store") + creds := &credentialsRequest{ + ServerURL: config.ServerAddress, + Username: config.Username, + Secret: config.Password, + } + + if config.IdentityToken != "" { + creds.Username = tokenUsername + creds.Secret = config.IdentityToken + } + + buffer := new(bytes.Buffer) + if err := json.NewEncoder(buffer).Encode(creds); err != nil { + return err + } + cmd.Input(buffer) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + logrus.Debugf("error adding credentials - err: %v, out: `%s`", err, t) + return fmt.Errorf(t) + } + + return nil +} + +// getCredentialsFromStore executes the command to get the credentials from the native store. +func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { + var ret types.AuthConfig + + cmd := c.commandFn("get") + cmd.Input(strings.NewReader(serverAddress)) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + // do not return an error if the credentials are not + // in the keyckain. Let docker ask for new credentials. + if t == errCredentialsNotFound.Error() { + return ret, nil + } + + logrus.Debugf("error getting credentials - err: %v, out: `%s`", err, t) + return ret, fmt.Errorf(t) + } + + var resp credentialsGetResponse + if err := json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { + return ret, err + } + + if resp.Username == tokenUsername { + ret.IdentityToken = resp.Secret + } else { + ret.Password = resp.Secret + ret.Username = resp.Username + } + + ret.ServerAddress = serverAddress + return ret, nil +} + +// eraseCredentialsFromStore executes the command to remove the server credentails from the native store. +func (c *nativeStore) eraseCredentialsFromStore(serverURL string) error { + cmd := c.commandFn("erase") + cmd.Input(strings.NewReader(serverURL)) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + logrus.Debugf("error erasing credentials - err: %v, out: `%s`", err, t) + return fmt.Errorf(t) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/shell_command.go b/vendor/github.com/docker/docker/cliconfig/credentials/shell_command.go new file mode 100644 index 00000000..b2afde63 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/shell_command.go @@ -0,0 +1,28 @@ +package credentials + +import ( + "io" + "github.com/docker/containerd/subreaper/exec" +) + +func shellCommandFn(storeName string) func(args ...string) command { + name := remoteCredentialsPrefix + storeName + return func(args ...string) command { + return &shell{cmd: exec.Command(name, args...)} + } +} + +// shell invokes shell commands to talk with a remote credentials helper. +type shell struct { + cmd *exec.Cmd +} + +// Output returns responses from the remote credentials helper. +func (s *shell) Output() ([]byte, error) { + return s.cmd.Output() +} + +// Input sets the input to send to a remote credentials helper. +func (s *shell) Input(in io.Reader) { + s.cmd.Stdin = in +} diff --git a/vendor/github.com/docker/docker/container/archive.go b/vendor/github.com/docker/docker/container/archive.go new file mode 100644 index 00000000..95b68285 --- /dev/null +++ b/vendor/github.com/docker/docker/container/archive.go @@ -0,0 +1,69 @@ +package container + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/engine-api/types" +) + +// ResolvePath resolves the given path in the container to a resource on the +// host. Returns a resolved path (absolute path to the resource on the host), +// the absolute path to the resource relative to the container's rootfs, and +// a error if the path points to outside the container's rootfs. +func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { + // Consider the given path as an absolute path in the container. + absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // Split the absPath into its Directory and Base components. We will + // resolve the dir in the scope of the container then append the base. + dirPath, basePath := filepath.Split(absPath) + + resolvedDirPath, err := container.GetResourcePath(dirPath) + if err != nil { + return "", "", err + } + + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + + return resolvedPath, absPath, nil +} + +// StatPath is the unexported version of StatPath. Locks and mounts should +// be acquired before calling this method and the given path should be fully +// resolved to a path on the host corresponding to the given absolute path +// inside the container. +func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { + lstat, err := os.Lstat(resolvedPath) + if err != nil { + return nil, err + } + + var linkTarget string + if lstat.Mode()&os.ModeSymlink != 0 { + // Fully evaluate the symlink in the scope of the container rootfs. + hostPath, err := container.GetResourcePath(absPath) + if err != nil { + return nil, err + } + + linkTarget, err = filepath.Rel(container.BaseFS, hostPath) + if err != nil { + return nil, err + } + + // Make it an absolute path. + linkTarget = filepath.Join(string(filepath.Separator), linkTarget) + } + + return &types.ContainerPathStat{ + Name: filepath.Base(absPath), + Size: lstat.Size(), + Mode: lstat.Mode(), + Mtime: lstat.ModTime(), + LinkTarget: linkTarget, + }, nil +} diff --git a/vendor/github.com/docker/docker/container/container.go b/vendor/github.com/docker/docker/container/container.go new file mode 100644 index 00000000..9bdd112d --- /dev/null +++ b/vendor/github.com/docker/docker/container/container.go @@ -0,0 +1,649 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/restartmanager" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/label" +) + +const configFileName = "config.v2.json" + +var ( + errInvalidEndpoint = fmt.Errorf("invalid endpoint while building port map info") + errInvalidNetwork = fmt.Errorf("invalid network settings while building port map info") +) + +// CommonContainer holds the fields for a container which are +// applicable across all platforms supported by the daemon. +type CommonContainer struct { + *runconfig.StreamConfig + // embed for Container to support states directly. + *State `json:"State"` // Needed for remote api version <= 1.11 + Root string `json:"-"` // Path to the "home" of the container, including metadata. + BaseFS string `json:"-"` // Path to the graphdriver mountpoint + RWLayer layer.RWLayer `json:"-"` + ID string + Created time.Time + Path string + Args []string + Config *containertypes.Config + ImageID image.ID `json:"Image"` + NetworkSettings *network.Settings + LogPath string + Name string + Driver string + // MountLabel contains the options for the 'mount' command + MountLabel string + ProcessLabel string + RestartCount int + HasBeenStartedBefore bool + HasBeenManuallyStopped bool // used for unless-stopped restart policy + MountPoints map[string]*volume.MountPoint + HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable + ExecCommands *exec.Store `json:"-"` + // logDriver for closing + LogDriver logger.Logger `json:"-"` + LogCopier *logger.Copier `json:"-"` + restartManager restartmanager.RestartManager + attachContext *attachContext +} + +// NewBaseContainer creates a new container with its +// basic configuration. +func NewBaseContainer(id, root string) *Container { + return &Container{ + CommonContainer: CommonContainer{ + ID: id, + State: NewState(), + ExecCommands: exec.NewStore(), + Root: root, + MountPoints: make(map[string]*volume.MountPoint), + StreamConfig: runconfig.NewStreamConfig(), + attachContext: &attachContext{}, + }, + } +} + +// FromDisk loads the container configuration stored in the host. +func (container *Container) FromDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := os.Open(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + dec := json.NewDecoder(jsonSource) + + // Load container settings + if err := dec.Decode(container); err != nil { + return err + } + + if err := label.ReserveLabel(container.ProcessLabel); err != nil { + return err + } + return container.readHostConfig() +} + +// ToDisk saves the container configuration on disk. +func (container *Container) ToDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := os.Create(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + enc := json.NewEncoder(jsonSource) + + // Save container settings + if err := enc.Encode(container); err != nil { + return err + } + + return container.WriteHostConfig() +} + +// ToDiskLocking saves the container configuration on disk in a thread safe way. +func (container *Container) ToDiskLocking() error { + container.Lock() + err := container.ToDisk() + container.Unlock() + return err +} + +// readHostConfig reads the host configuration from disk for the container. +func (container *Container) readHostConfig() error { + container.HostConfig = &containertypes.HostConfig{} + // If the hostconfig file does not exist, do not read it. + // (We still have to initialize container.HostConfig, + // but that's OK, since we just did that above.) + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := os.Open(pth) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil { + return err + } + + container.InitDNSHostConfig() + + return nil +} + +// WriteHostConfig saves the host configuration on disk for the container. +func (container *Container) WriteHostConfig() error { + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := os.Create(pth) + if err != nil { + return err + } + defer f.Close() + + return json.NewEncoder(f).Encode(&container.HostConfig) +} + +// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir +func (container *Container) SetupWorkingDirectory(rootUID, rootGID int) error { + if container.Config.WorkingDir == "" { + return nil + } + + // If can't mount container FS at this point (eg Hyper-V Containers on + // Windows) bail out now with no action. + if !container.canMountFS() { + return nil + } + + container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) + + pth, err := container.GetResourcePath(container.Config.WorkingDir) + if err != nil { + return err + } + + if err := idtools.MkdirAllNewAs(pth, 0755, rootUID, rootGID); err != nil { + pthInfo, err2 := os.Stat(pth) + if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { + return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + } + + return err + } + + return nil +} + +// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path +// sanitisation. Symlinks are all scoped to the BaseFS of the container, as +// though the container's BaseFS was `/`. +// +// The BaseFS of a container is the host-facing path which is bind-mounted as +// `/` inside the container. This method is essentially used to access a +// particular path inside the container as though you were a process in that +// container. +// +// NOTE: The returned path is *only* safely scoped inside the container's BaseFS +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + + cleanPath := cleanResourcePath(path) + r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS) + return r, e +} + +// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path +// sanitisation. Symlinks are all scoped to the root of the container, as +// though the container's root was `/`. +// +// The root of a container is the host-facing configuration metadata directory. +// Only use this method to safely access the container's `container.json` or +// other metadata files. If in doubt, use container.GetResourcePath. +// +// NOTE: The returned path is *only* safely scoped inside the container's root +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetRootResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + cleanPath := filepath.Join(string(os.PathSeparator), path) + return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) +} + +// ExitOnNext signals to the monitor that it should not restart the container +// after we send the kill signal. +func (container *Container) ExitOnNext() { + if container.restartManager != nil { + container.restartManager.Cancel() + } +} + +// HostConfigPath returns the path to the container's JSON hostconfig +func (container *Container) HostConfigPath() (string, error) { + return container.GetRootResourcePath("hostconfig.json") +} + +// ConfigPath returns the path to the container's JSON config +func (container *Container) ConfigPath() (string, error) { + return container.GetRootResourcePath(configFileName) +} + +// StartLogger starts a new logger driver for the container. +func (container *Container) StartLogger(cfg containertypes.LogConfig) (logger.Logger, error) { + c, err := logger.GetLogDriver(cfg.Type) + if err != nil { + return nil, fmt.Errorf("Failed to get logging factory: %v", err) + } + ctx := logger.Context{ + Config: cfg.Config, + ContainerID: container.ID, + ContainerName: container.Name, + ContainerEntrypoint: container.Path, + ContainerArgs: container.Args, + ContainerImageID: container.ImageID.String(), + ContainerImageName: container.Config.Image, + ContainerCreated: container.Created, + ContainerEnv: container.Config.Env, + ContainerLabels: container.Config.Labels, + } + + // Set logging file for "json-logger" + if cfg.Type == jsonfilelog.Name { + ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) + if err != nil { + return nil, err + } + } + return c(ctx) +} + +// GetProcessLabel returns the process label for the container. +func (container *Container) GetProcessLabel() string { + // even if we have a process label return "" if we are running + // in privileged mode + if container.HostConfig.Privileged { + return "" + } + return container.ProcessLabel +} + +// GetMountLabel returns the mounting label for the container. +// This label is empty if the container is privileged. +func (container *Container) GetMountLabel() string { + if container.HostConfig.Privileged { + return "" + } + return container.MountLabel +} + +// GetExecIDs returns the list of exec commands running on the container. +func (container *Container) GetExecIDs() []string { + return container.ExecCommands.List() +} + +// Attach connects to the container's TTY, delegating to standard +// streams or websockets depending on the configuration. +func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { + ctx := container.InitAttachContext() + return AttachStreams(ctx, container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr, keys) +} + +// AttachStreams connects streams to a TTY. +// Used by exec too. Should this move somewhere else? +func AttachStreams(ctx context.Context, streamConfig *runconfig.StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { + var ( + cStdout, cStderr io.ReadCloser + cStdin io.WriteCloser + wg sync.WaitGroup + errors = make(chan error, 3) + ) + + if stdin != nil && openStdin { + cStdin = streamConfig.StdinPipe() + wg.Add(1) + } + + if stdout != nil { + cStdout = streamConfig.StdoutPipe() + wg.Add(1) + } + + if stderr != nil { + cStderr = streamConfig.StderrPipe() + wg.Add(1) + } + + // Connect stdin of container to the http conn. + go func() { + if stdin == nil || !openStdin { + return + } + logrus.Debugf("attach: stdin: begin") + + var err error + if tty { + _, err = copyEscapable(cStdin, stdin, keys) + } else { + _, err = io.Copy(cStdin, stdin) + + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: stdin: %s", err) + errors <- err + } + if stdinOnce && !tty { + cStdin.Close() + } else { + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + } + logrus.Debugf("attach: stdin: end") + wg.Done() + }() + + attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { + if stream == nil { + return + } + + logrus.Debugf("attach: %s: begin", name) + _, err := io.Copy(stream, streamPipe) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: %s: %v", name, err) + errors <- err + } + // Make sure stdin gets closed + if stdin != nil { + stdin.Close() + } + streamPipe.Close() + logrus.Debugf("attach: %s: end", name) + wg.Done() + } + + go attachStream("stdout", stdout, cStdout) + go attachStream("stderr", stderr, cStderr) + + return promise.Go(func() error { + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-ctx.Done(): + // close all pipes + if cStdin != nil { + cStdin.Close() + } + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + <-done + } + close(errors) + for err := range errors { + if err != nil { + return err + } + } + return nil + }) +} + +// Code c/c from io.Copy() modified to handle escape sequence +func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { + if len(keys) == 0 { + // Default keys : ctrl-p ctrl-q + keys = []byte{16, 17} + } + buf := make([]byte, 32*1024) + for { + nr, er := src.Read(buf) + if nr > 0 { + // ---- Docker addition + for i, key := range keys { + if nr != 1 || buf[0] != key { + break + } + if i == len(keys)-1 { + if err := src.Close(); err != nil { + return 0, err + } + return 0, nil + } + nr, er = src.Read(buf) + } + // ---- End of docker + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} + +// ShouldRestartOnBoot decides whether the daemon should restart the container or not. +// This is based on the container's restart policy. +func (container *Container) ShouldRestartOnBoot() bool { + return container.HostConfig.RestartPolicy.Name == "always" || + (container.HostConfig.RestartPolicy.Name == "unless-stopped" && !container.HasBeenManuallyStopped) || + (container.HostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) +} + +// AddBindMountPoint adds a new bind mount point configuration to the container. +func (container *Container) AddBindMountPoint(name, source, destination string, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Name: name, + Source: source, + Destination: destination, + RW: rw, + } +} + +// AddLocalMountPoint adds a new local mount point configuration to the container. +func (container *Container) AddLocalMountPoint(name, destination string, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Name: name, + Driver: volume.DefaultDriverName, + Destination: destination, + RW: rw, + } +} + +// AddMountPointWithVolume adds a new mount point configured with a volume to the container. +func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Name: vol.Name(), + Driver: vol.DriverName(), + Destination: destination, + RW: rw, + Volume: vol, + CopyData: volume.DefaultCopyMode, + } +} + +// IsDestinationMounted checks whether a path is mounted on the container or not. +func (container *Container) IsDestinationMounted(destination string) bool { + return container.MountPoints[destination] != nil +} + +// StopSignal returns the signal used to stop the container. +func (container *Container) StopSignal() int { + var stopSignal syscall.Signal + if container.Config.StopSignal != "" { + stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) + } + + if int(stopSignal) == 0 { + stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) + } + return int(stopSignal) +} + +// InitDNSHostConfig ensures that the dns fields are never nil. +// New containers don't ever have those fields nil, +// but pre created containers can still have those nil values. +// The non-recommended host configuration in the start api can +// make these fields nil again, this corrects that issue until +// we remove that behavior for good. +// See https://github.com/docker/docker/pull/17779 +// for a more detailed explanation on why we don't want that. +func (container *Container) InitDNSHostConfig() { + container.Lock() + defer container.Unlock() + if container.HostConfig.DNS == nil { + container.HostConfig.DNS = make([]string, 0) + } + + if container.HostConfig.DNSSearch == nil { + container.HostConfig.DNSSearch = make([]string, 0) + } + + if container.HostConfig.DNSOptions == nil { + container.HostConfig.DNSOptions = make([]string, 0) + } +} + +// UpdateMonitor updates monitor configure for running container +func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) { + type policySetter interface { + SetPolicy(containertypes.RestartPolicy) + } + + if rm, ok := container.RestartManager(false).(policySetter); ok { + rm.SetPolicy(restartPolicy) + } +} + +// FullHostname returns hostname and optional domain appended to it. +func (container *Container) FullHostname() string { + fullHostname := container.Config.Hostname + if container.Config.Domainname != "" { + fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) + } + return fullHostname +} + +// RestartManager returns the current restartmanager instace connected to container. +func (container *Container) RestartManager(reset bool) restartmanager.RestartManager { + if reset { + container.RestartCount = 0 + container.restartManager = nil + } + if container.restartManager == nil { + container.restartManager = restartmanager.New(container.HostConfig.RestartPolicy) + } + return container.restartManager +} + +type attachContext struct { + ctx context.Context + cancel context.CancelFunc + mu sync.Mutex +} + +// InitAttachContext initialize or returns existing context for attach calls to +// track container liveness. +func (container *Container) InitAttachContext() context.Context { + container.attachContext.mu.Lock() + defer container.attachContext.mu.Unlock() + if container.attachContext.ctx == nil { + container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background()) + } + return container.attachContext.ctx +} + +// CancelAttachContext cancel attach context. All attach calls should detach +// after this call. +func (container *Container) CancelAttachContext() { + container.attachContext.mu.Lock() + if container.attachContext.ctx != nil { + container.attachContext.cancel() + container.attachContext.ctx = nil + } + container.attachContext.mu.Unlock() +} diff --git a/vendor/github.com/docker/docker/container/container_unix.go b/vendor/github.com/docker/docker/container/container_unix.go new file mode 100644 index 00000000..754090f9 --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_unix.go @@ -0,0 +1,405 @@ +// +build linux freebsd + +package container + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/label" +) + +// DefaultSHMSize is the default size (64MB) of the SHM which will be mounted in the container +const DefaultSHMSize int64 = 67108864 + +// Container holds the fields specific to unixen implementations. +// See CommonContainer for standard fields common to all containers. +type Container struct { + CommonContainer + + // Fields below here are platform specific. + AppArmorProfile string + HostnamePath string + HostsPath string + ShmPath string + ResolvConfPath string + SeccompProfile string + NoNewPrivileges bool +} + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int + + // Whether the container encountered an OOM. + OOMKilled bool +} + +// CreateDaemonEnvironment returns the list of all environment variables given the list of +// environment variables related to links. +// Sets PATH, HOSTNAME and if container.Config.Tty is set: TERM. +// The defaults set here do not override the values in container.Config.Env +func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string { + // Setup environment + env := []string{ + "PATH=" + system.DefaultPathEnv, + "HOSTNAME=" + container.Config.Hostname, + } + if container.Config.Tty { + env = append(env, "TERM=xterm") + } + env = append(env, linkedEnv...) + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) + return env +} + +// TrySetNetworkMount attempts to set the network mounts given a provided destination and +// the path to use for it; return true if the given destination was a network mount file +func (container *Container) TrySetNetworkMount(destination string, path string) bool { + if destination == "/etc/resolv.conf" { + container.ResolvConfPath = path + return true + } + if destination == "/etc/hostname" { + container.HostnamePath = path + return true + } + if destination == "/etc/hosts" { + container.HostsPath = path + return true + } + + return false +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + hostnamePath, err := container.GetRootResourcePath("hostname") + if err != nil { + return err + } + container.HostnamePath = hostnamePath + return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) +} + +// appendNetworkMounts appends any network mounts to the array of mount points passed in +func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) { + for _, mnt := range container.NetworkMounts() { + dest, err := container.GetResourcePath(mnt.Destination) + if err != nil { + return nil, err + } + volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest}) + } + return volumeMounts, nil +} + +// NetworkMounts returns the list of network mounts. +func (container *Container) NetworkMounts() []Mount { + var mounts []Mount + shared := container.HostConfig.NetworkMode.IsContainer() + if container.ResolvConfPath != "" { + if _, err := os.Stat(container.ResolvConfPath); err != nil { + logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) + } else { + label.Relabel(container.ResolvConfPath, container.MountLabel, shared) + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.ResolvConfPath, + Destination: "/etc/resolv.conf", + Writable: writable, + Propagation: volume.DefaultPropagationMode, + }) + } + } + if container.HostnamePath != "" { + if _, err := os.Stat(container.HostnamePath); err != nil { + logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) + } else { + label.Relabel(container.HostnamePath, container.MountLabel, shared) + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hostname"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostnamePath, + Destination: "/etc/hostname", + Writable: writable, + Propagation: volume.DefaultPropagationMode, + }) + } + } + if container.HostsPath != "" { + if _, err := os.Stat(container.HostsPath); err != nil { + logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) + } else { + label.Relabel(container.HostsPath, container.MountLabel, shared) + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hosts"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostsPath, + Destination: "/etc/hosts", + Writable: writable, + Propagation: volume.DefaultPropagationMode, + }) + } + } + return mounts +} + +// CopyImagePathContent copies files in destination to the volume. +func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { + rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS) + if err != nil { + return err + } + + if _, err = ioutil.ReadDir(rootfs); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + path, err := v.Mount() + if err != nil { + return err + } + defer v.Unmount() + return copyExistingContents(rootfs, path) +} + +// ShmResourcePath returns path to shm +func (container *Container) ShmResourcePath() (string, error) { + return container.GetRootResourcePath("shm") +} + +// HasMountFor checks if path is a mountpoint +func (container *Container) HasMountFor(path string) bool { + _, exists := container.MountPoints[path] + return exists +} + +// UnmountIpcMounts uses the provided unmount function to unmount shm and mqueue if they were mounted +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { + if container.HostConfig.IpcMode.IsContainer() || container.HostConfig.IpcMode.IsHost() { + return + } + + var warnings []string + + if !container.HasMountFor("/dev/shm") { + shmPath, err := container.ShmResourcePath() + if err != nil { + logrus.Error(err) + warnings = append(warnings, err.Error()) + } else if shmPath != "" { + if err := unmount(shmPath); err != nil { + warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err)) + } + + } + } + + if len(warnings) > 0 { + logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n")) + } +} + +// IpcMounts returns the list of IPC mounts +func (container *Container) IpcMounts() []Mount { + var mounts []Mount + + if !container.HasMountFor("/dev/shm") { + label.SetFileLabel(container.ShmPath, container.MountLabel) + mounts = append(mounts, Mount{ + Source: container.ShmPath, + Destination: "/dev/shm", + Writable: true, + Propagation: volume.DefaultPropagationMode, + }) + } + + return mounts +} + +// UpdateContainer updates configuration of a container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + + // update resources of container + resources := hostConfig.Resources + cResources := &container.HostConfig.Resources + if resources.BlkioWeight != 0 { + cResources.BlkioWeight = resources.BlkioWeight + } + if resources.CPUShares != 0 { + cResources.CPUShares = resources.CPUShares + } + if resources.CPUPeriod != 0 { + cResources.CPUPeriod = resources.CPUPeriod + } + if resources.CPUQuota != 0 { + cResources.CPUQuota = resources.CPUQuota + } + if resources.CpusetCpus != "" { + cResources.CpusetCpus = resources.CpusetCpus + } + if resources.CpusetMems != "" { + cResources.CpusetMems = resources.CpusetMems + } + if resources.Memory != 0 { + cResources.Memory = resources.Memory + } + if resources.MemorySwap != 0 { + cResources.MemorySwap = resources.MemorySwap + } + if resources.MemoryReservation != 0 { + cResources.MemoryReservation = resources.MemoryReservation + } + if resources.KernelMemory != 0 { + cResources.KernelMemory = resources.KernelMemory + } + + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + + if err := container.ToDisk(); err != nil { + logrus.Errorf("Error saving updated container: %v", err) + return err + } + + return nil +} + +func detachMounted(path string) error { + return syscall.Unmount(path, syscall.MNT_DETACH) +} + +// UnmountVolumes unmounts all volumes +func (container *Container) UnmountVolumes(forceSyscall bool, volumeEventLog func(name, action string, attributes map[string]string)) error { + var ( + volumeMounts []volume.MountPoint + err error + ) + + for _, mntPoint := range container.MountPoints { + dest, err := container.GetResourcePath(mntPoint.Destination) + if err != nil { + return err + } + + volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest, Volume: mntPoint.Volume}) + } + + // Append any network mounts to the list (this is a no-op on Windows) + if volumeMounts, err = appendNetworkMounts(container, volumeMounts); err != nil { + return err + } + + for _, volumeMount := range volumeMounts { + if forceSyscall { + if err := detachMounted(volumeMount.Destination); err != nil { + logrus.Warnf("%s unmountVolumes: Failed to do lazy umount %v", container.ID, err) + } + } + + if volumeMount.Volume != nil { + if err := volumeMount.Volume.Unmount(); err != nil { + return err + } + + attributes := map[string]string{ + "driver": volumeMount.Volume.DriverName(), + "container": container.ID, + } + volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) + } + } + + return nil +} + +// copyExistingContents copies from the source to the destination and +// ensures the ownership is appropriately set. +func copyExistingContents(source, destination string) error { + volList, err := ioutil.ReadDir(source) + if err != nil { + return err + } + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + if len(srcList) == 0 { + // If the source volume is empty, copies files from the root into the volume + if err := chrootarchive.CopyWithTar(source, destination); err != nil { + return err + } + } + } + return copyOwnership(source, destination) +} + +// copyOwnership copies the permissions and uid:gid of the source file +// to the destination file +func copyOwnership(source, destination string) error { + stat, err := system.Stat(source) + if err != nil { + return err + } + + if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil { + return err + } + + return os.Chmod(destination, os.FileMode(stat.Mode())) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() []Mount { + var mounts []Mount + for dest, data := range container.HostConfig.Tmpfs { + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + return mounts +} + +// cleanResourcePath cleans a resource path and prepares to combine with mnt path +func cleanResourcePath(path string) string { + return filepath.Join(string(os.PathSeparator), path) +} + +// canMountFS determines if the file system for the container +// can be mounted locally. A no-op on non-Windows platforms +func (container *Container) canMountFS() bool { + return true +} diff --git a/vendor/github.com/docker/docker/container/history.go b/vendor/github.com/docker/docker/container/history.go new file mode 100644 index 00000000..afce1d4a --- /dev/null +++ b/vendor/github.com/docker/docker/container/history.go @@ -0,0 +1,35 @@ +package container + +import "sort" + +// History is a convenience type for storing a list of containers, +// sorted by creation date in descendant order. +type History []*Container + +// Len returns the number of containers in the history. +func (history *History) Len() int { + return len(*history) +} + +// Less compares two containers and returns true if the second one +// was created before the first one. +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].Created.Before(containers[i].Created) +} + +// Swap switches containers i and j positions in the history. +func (history *History) Swap(i, j int) { + containers := *history + containers[i], containers[j] = containers[j], containers[i] +} + +// Add the given container to history. +func (history *History) Add(container *Container) { + *history = append(*history, container) +} + +// sort orders the history by creation date in descendant order. +func (history *History) sort() { + sort.Sort(history) +} diff --git a/vendor/github.com/docker/docker/container/memory_store.go b/vendor/github.com/docker/docker/container/memory_store.go new file mode 100644 index 00000000..30c1f7ad --- /dev/null +++ b/vendor/github.com/docker/docker/container/memory_store.go @@ -0,0 +1,92 @@ +package container + +import "sync" + +// memoryStore implements a Store in memory. +type memoryStore struct { + s map[string]*Container + sync.RWMutex +} + +// NewMemoryStore initializes a new memory store. +func NewMemoryStore() Store { + return &memoryStore{ + s: make(map[string]*Container), + } +} + +// Add appends a new container to the memory store. +// It overrides the id if it existed before. +func (c *memoryStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +// Get returns a container from the store by id. +func (c *memoryStore) Get(id string) *Container { + c.RLock() + res := c.s[id] + c.RUnlock() + return res +} + +// Delete removes a container from the store by id. +func (c *memoryStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +// List returns a sorted list of containers from the store. +// The containers are ordered by creation date. +func (c *memoryStore) List() []*Container { + containers := new(History) + c.RLock() + for _, cont := range c.s { + containers.Add(cont) + } + c.RUnlock() + containers.sort() + return *containers +} + +// Size returns the number of containers in the store. +func (c *memoryStore) Size() int { + c.RLock() + defer c.RUnlock() + return len(c.s) +} + +// First returns the first container found in the store by a given filter. +func (c *memoryStore) First(filter StoreFilter) *Container { + c.RLock() + defer c.RUnlock() + for _, cont := range c.s { + if filter(cont) { + return cont + } + } + return nil +} + +// ApplyAll calls the reducer function with every container in the store. +// This operation is asyncronous in the memory store. +// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. +func (c *memoryStore) ApplyAll(apply StoreReducer) { + c.RLock() + defer c.RUnlock() + + wg := new(sync.WaitGroup) + for _, cont := range c.s { + wg.Add(1) + go func(container *Container) { + apply(container) + wg.Done() + }(cont) + } + + wg.Wait() +} + +var _ Store = &memoryStore{} diff --git a/vendor/github.com/docker/docker/container/monitor.go b/vendor/github.com/docker/docker/container/monitor.go new file mode 100644 index 00000000..ba82d875 --- /dev/null +++ b/vendor/github.com/docker/docker/container/monitor.go @@ -0,0 +1,60 @@ +package container + +import ( + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + loggerCloseTimeout = 10 * time.Second +) + +// supervisor defines the interface that a supervisor must implement +type supervisor interface { + // LogContainerEvent generates events related to a given container + LogContainerEvent(*Container, string) + // Cleanup ensures that the container is properly unmounted + Cleanup(*Container) + // StartLogging starts the logging driver for the container + StartLogging(*Container) error + // Run starts a container + Run(c *Container) error + // IsShuttingDown tells whether the supervisor is shutting down or not + IsShuttingDown() bool +} + +// Reset puts a container into a state where it can be restarted again. +func (container *Container) Reset(lock bool) { + if lock { + container.Lock() + defer container.Unlock() + } + + if err := container.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", container.ID, err) + } + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.NewInputPipes() + } + + if container.LogDriver != nil { + if container.LogCopier != nil { + exit := make(chan struct{}) + go func() { + container.LogCopier.Wait() + close(exit) + }() + select { + case <-time.After(loggerCloseTimeout): + logrus.Warnf("Logger didn't exit in time: logs may be truncated") + case <-exit: + } + } + container.LogDriver.Close() + container.LogCopier = nil + container.LogDriver = nil + } +} diff --git a/vendor/github.com/docker/docker/container/mounts_unix.go b/vendor/github.com/docker/docker/container/mounts_unix.go new file mode 100644 index 00000000..c52abed2 --- /dev/null +++ b/vendor/github.com/docker/docker/container/mounts_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package container + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Data string `json:"data"` + Propagation string `json:"mountpropagation"` +} diff --git a/vendor/github.com/docker/docker/container/state.go b/vendor/github.com/docker/docker/container/state.go new file mode 100644 index 00000000..a12a193e --- /dev/null +++ b/vendor/github.com/docker/docker/container/state.go @@ -0,0 +1,283 @@ +package container + +import ( + "fmt" + "sync" + "time" + + "github.com/docker/go-units" +) + +// State holds the current container state, and has methods to get and +// set the state. Container has an embed, which allows all of the +// functions defined against State to run against Container. +type State struct { + sync.Mutex + // FIXME: Why do we have both paused and running if a + // container cannot be paused and running at the same time? + Running bool + Paused bool + Restarting bool + OOMKilled bool + RemovalInProgress bool // Not need for this to be persistent on disk. + Dead bool + Pid int + ExitCode int + Error string // contains last known error when starting the container + StartedAt time.Time + FinishedAt time.Time + waitChan chan struct{} +} + +// NewState creates a default state object with a fresh channel for state changes. +func NewState() *State { + return &State{ + waitChan: make(chan struct{}), + } +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.RemovalInProgress { + return "Removal In Progress" + } + + if s.Dead { + return "Dead" + } + + if s.StartedAt.IsZero() { + return "Created" + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + + if s.Dead { + return "dead" + } + + if s.StartedAt.IsZero() { + return "created" + } + + return "exited" +} + +// IsValidStateString checks if the provided string is a valid container state or not. +func IsValidStateString(s string) bool { + if s != "paused" && + s != "restarting" && + s != "running" && + s != "dead" && + s != "created" && + s != "exited" { + return false + } + return true +} + +func wait(waitChan <-chan struct{}, timeout time.Duration) error { + if timeout < 0 { + <-waitChan + return nil + } + select { + case <-time.After(timeout): + return fmt.Errorf("Timed out: %v", timeout) + case <-waitChan: + return nil + } +} + +// WaitRunning waits until state is running. If state is already +// running it returns immediately. If you want wait forever you must +// supply negative timeout. Returns pid, that was passed to +// SetRunning. +func (s *State) WaitRunning(timeout time.Duration) (int, error) { + s.Lock() + if s.Running { + pid := s.Pid + s.Unlock() + return pid, nil + } + waitChan := s.waitChan + s.Unlock() + if err := wait(waitChan, timeout); err != nil { + return -1, err + } + return s.GetPID(), nil +} + +// WaitStop waits until state is stopped. If state already stopped it returns +// immediately. If you want wait forever you must supply negative timeout. +// Returns exit code, that was passed to SetStoppedLocking +func (s *State) WaitStop(timeout time.Duration) (int, error) { + s.Lock() + if !s.Running { + exitCode := s.ExitCode + s.Unlock() + return exitCode, nil + } + waitChan := s.waitChan + s.Unlock() + if err := wait(waitChan, timeout); err != nil { + return -1, err + } + return s.getExitCode(), nil +} + +// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. +func (s *State) IsRunning() bool { + s.Lock() + res := s.Running + s.Unlock() + return res +} + +// GetPID holds the process id of a container. +func (s *State) GetPID() int { + s.Lock() + res := s.Pid + s.Unlock() + return res +} + +func (s *State) getExitCode() int { + s.Lock() + res := s.ExitCode + s.Unlock() + return res +} + +// SetRunning sets the state of the container to "running". +func (s *State) SetRunning(pid int, initial bool) { + s.Error = "" + s.Running = true + s.Paused = false + s.Restarting = false + s.ExitCode = 0 + s.Pid = pid + if initial { + s.StartedAt = time.Now().UTC() + } + close(s.waitChan) // fire waiters for start + s.waitChan = make(chan struct{}) +} + +// SetStoppedLocking locks the container state is sets it to "stopped". +func (s *State) SetStoppedLocking(exitStatus *ExitStatus) { + s.Lock() + s.SetStopped(exitStatus) + s.Unlock() +} + +// SetStopped sets the container state to "stopped" without locking. +func (s *State) SetStopped(exitStatus *ExitStatus) { + s.Running = false + s.Paused = false + s.Restarting = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) +} + +// SetRestartingLocking is when docker handles the auto restart of containers when they are +// in the middle of a stop and being restarted again +func (s *State) SetRestartingLocking(exitStatus *ExitStatus) { + s.Lock() + s.SetRestarting(exitStatus) + s.Unlock() +} + +// SetRestarting sets the container state to "restarting". +// It also sets the container PID to 0. +func (s *State) SetRestarting(exitStatus *ExitStatus) { + // we should consider the container running when it is restarting because of + // all the checks in docker around rm/stop/etc + s.Running = true + s.Restarting = true + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) +} + +// SetError sets the container's error state. This is useful when we want to +// know the error that occurred when container transits to another state +// when inspecting it +func (s *State) SetError(err error) { + s.Error = err.Error() +} + +// IsPaused returns whether the container is paused or not. +func (s *State) IsPaused() bool { + s.Lock() + res := s.Paused + s.Unlock() + return res +} + +// IsRestarting returns whether the container is restarting or not. +func (s *State) IsRestarting() bool { + s.Lock() + res := s.Restarting + s.Unlock() + return res +} + +// SetRemovalInProgress sets the container state as being removed. +// It returns true if the container was already in that state. +func (s *State) SetRemovalInProgress() bool { + s.Lock() + defer s.Unlock() + if s.RemovalInProgress { + return true + } + s.RemovalInProgress = true + return false +} + +// ResetRemovalInProgress make the RemovalInProgress state to false. +func (s *State) ResetRemovalInProgress() { + s.Lock() + s.RemovalInProgress = false + s.Unlock() +} + +// SetDead sets the container state to "dead" +func (s *State) SetDead() { + s.Lock() + s.Dead = true + s.Unlock() +} diff --git a/vendor/github.com/docker/docker/container/state_unix.go b/vendor/github.com/docker/docker/container/state_unix.go new file mode 100644 index 00000000..8d25a237 --- /dev/null +++ b/vendor/github.com/docker/docker/container/state_unix.go @@ -0,0 +1,10 @@ +// +build linux freebsd + +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCode = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled +} diff --git a/vendor/github.com/docker/docker/container/store.go b/vendor/github.com/docker/docker/container/store.go new file mode 100644 index 00000000..042fb1a3 --- /dev/null +++ b/vendor/github.com/docker/docker/container/store.go @@ -0,0 +1,28 @@ +package container + +// StoreFilter defines a function to filter +// container in the store. +type StoreFilter func(*Container) bool + +// StoreReducer defines a function to +// manipulate containers in the store +type StoreReducer func(*Container) + +// Store defines an interface that +// any container store must implement. +type Store interface { + // Add appends a new container to the store. + Add(string, *Container) + // Get returns a container from the store by the identifier it was stored with. + Get(string) *Container + // Delete removes a container from the store by the identifier it was stored with. + Delete(string) + // List returns a list of containers from the store. + List() []*Container + // Size returns the number of containers in the store. + Size() int + // First returns the first container found in the store by a given filter. + First(StoreFilter) *Container + // ApplyAll calls the reducer function with every container in the store. + ApplyAll(StoreReducer) +} diff --git a/vendor/github.com/docker/docker/daemon/apparmor_default.go b/vendor/github.com/docker/docker/daemon/apparmor_default.go new file mode 100644 index 00000000..e4065b4a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/apparmor_default.go @@ -0,0 +1,30 @@ +// +build linux + +package daemon + +import ( + "github.com/Sirupsen/logrus" + aaprofile "github.com/docker/docker/profiles/apparmor" + "github.com/opencontainers/runc/libcontainer/apparmor" +) + +// Define constants for native driver +const ( + defaultApparmorProfile = "docker-default" +) + +func installDefaultAppArmorProfile() { + if apparmor.IsEnabled() { + if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil { + apparmorProfiles := []string{defaultApparmorProfile} + + // Allow daemon to run if loading failed, but are active + // (possibly through another run, manually, or via system startup) + for _, policy := range apparmorProfiles { + if err := aaprofile.IsLoaded(policy); err != nil { + logrus.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", policy) + } + } + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go b/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go new file mode 100644 index 00000000..f186a68a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go @@ -0,0 +1,6 @@ +// +build !linux + +package daemon + +func installDefaultAppArmorProfile() { +} diff --git a/vendor/github.com/docker/docker/daemon/archive.go b/vendor/github.com/docker/docker/daemon/archive.go new file mode 100644 index 00000000..fb023689 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/archive.go @@ -0,0 +1,432 @@ +package daemon + +import ( + "errors" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/builder" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/engine-api/types" +) + +// ErrExtractPointNotDirectory is used to convey that the operation to extract +// a tar archive to a directory in a container has failed because the specified +// path does not refer to a directory. +var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") + +// ErrRootFSReadOnly is returned when a container +var ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") + +// ContainerCopy performs a deprecated operation of archiving the resource at +// the specified path in the container identified by the given name. +func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if res[0] == '/' || res[0] == '\\' { + res = res[1:] + } + + return daemon.containerCopy(container, res) +} + +// ContainerStatPath stats the filesystem resource at the specified path in the +// container identified by the given name. +func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + return daemon.containerStatPath(container, path) +} + +// ContainerArchivePath creates an archive of the filesystem resource at the +// specified path in the container identified by the given name. Returns a +// tar archive of the resource and whether it was a directory or a single file. +func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, nil, err + } + + return daemon.containerArchivePath(container, path) +} + +// ContainerExtractToDir extracts the given archive to the specified location +// in the filesystem of the container identified by the given name. The given +// path must be of a directory in the container. If it is not, the error will +// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will +// be an error if unpacking the given content would cause an existing directory +// to be replaced with a non-directory and vice versa. +func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + return daemon.containerExtractToDir(container, path, noOverwriteDirNonDir, content) +} + +// containerStatPath stats the filesystem resource at the specified path in this +// container. Returns stat info about the resource. +func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return nil, err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.UnmountVolumes(true, daemon.LogVolumeEvent) + if err != nil { + return nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, err + } + + return container.StatPath(resolvedPath, absPath) +} + +// containerArchivePath creates an archive of the filesystem resource at the specified +// path in this container. Returns a tar archive of the resource and stat info +// about the resource. +func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err = daemon.Mount(container); err != nil { + return nil, nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.UnmountVolumes(true, daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err = daemon.mountVolumes(container); err != nil { + return nil, nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, nil, err + } + + stat, err = container.StatPath(resolvedPath, absPath) + if err != nil { + return nil, nil, err + } + + // We need to rebase the archive entries if the last element of the + // resolved path was a symlink that was evaluated and is now different + // than the requested path. For example, if the given path was "/foo/bar/", + // but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want + // to ensure that the archive entries start with "bar" and not "baz". This + // also catches the case when the root directory of the container is + // requested: we want the archive entries to start with "/" and not the + // container ID. + data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) + if err != nil { + return nil, nil, err + } + + content = ioutils.NewReadCloserWrapper(data, func() error { + err := data.Close() + container.UnmountVolumes(true, daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + + daemon.LogContainerEvent(container, "archive-path") + + return content, stat, nil +} + +// containerExtractToDir extracts the given tar archive to the specified location in the +// filesystem of this container. The given path must be of a directory in the +// container. If it is not, the error will be ErrExtractPointNotDirectory. If +// noOverwriteDirNonDir is true then it will be an error if unpacking the +// given content would cause an existing directory to be replaced with a non- +// directory and vice versa. +func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.UnmountVolumes(true, daemon.LogVolumeEvent) + if err != nil { + return err + } + + // The destination path needs to be resolved to a host path, with all + // symbolic links followed in the scope of the container's rootfs. Note + // that we do not use `container.ResolvePath(path)` here because we need + // to also evaluate the last path element if it is a symlink. This is so + // that you can extract an archive to a symlink that points to a directory. + + // Consider the given path as an absolute path in the container. + absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // This will evaluate the last path element if it is a symlink. + resolvedPath, err := container.GetResourcePath(absPath) + if err != nil { + return err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return err + } + + if !stat.IsDir() { + return ErrExtractPointNotDirectory + } + + // Need to check if the path is in a volume. If it is, it cannot be in a + // read-only volume. If it is not in a volume, the container cannot be + // configured with a read-only rootfs. + + // Use the resolved path relative to the container rootfs as the new + // absPath. This way we fully follow any symlinks in a volume that may + // lead back outside the volume. + // + // The Windows implementation of filepath.Rel in golang 1.4 does not + // support volume style file path semantics. On Windows when using the + // filter driver, we are guaranteed that the path will always be + // a volume file path. + var baseRel string + if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { + if strings.HasPrefix(resolvedPath, container.BaseFS) { + baseRel = resolvedPath[len(container.BaseFS):] + if baseRel[:1] == `\` { + baseRel = baseRel[1:] + } + } + } else { + baseRel, err = filepath.Rel(container.BaseFS, resolvedPath) + } + if err != nil { + return err + } + // Make it an absolute path. + absPath = filepath.Join(string(filepath.Separator), baseRel) + + toVolume, err := checkIfPathIsInAVolume(container, absPath) + if err != nil { + return err + } + + if !toVolume && container.HostConfig.ReadonlyRootfs { + return ErrRootFSReadOnly + } + + uid, gid := daemon.GetRemappedUIDGID() + options := &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + ChownOpts: &archive.TarChownOptions{ + UID: uid, GID: gid, // TODO: should all ownership be set to root (either real or remapped)? + }, + } + if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { + return err + } + + daemon.LogContainerEvent(container, "extract-to-dir") + + return nil +} + +func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err := daemon.Mount(container); err != nil { + return nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.UnmountVolumes(true, daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err := daemon.mountVolumes(container); err != nil { + return nil, err + } + + basePath, err := container.GetResourcePath(resource) + if err != nil { + return nil, err + } + stat, err := os.Stat(basePath) + if err != nil { + return nil, err + } + var filter []string + if !stat.IsDir() { + d, f := filepath.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{filepath.Base(basePath)} + basePath = filepath.Dir(basePath) + } + archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + IncludeFiles: filter, + }) + if err != nil { + return nil, err + } + + reader := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.UnmountVolumes(true, daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + daemon.LogContainerEvent(container, "copy") + return reader, nil +} + +// CopyOnBuild copies/extracts a source FileInfo to a destination path inside a container +// specified by a container object. +// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). +// CopyOnBuild should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. +func (daemon *Daemon) CopyOnBuild(cID string, destPath string, src builder.FileInfo, decompress bool) error { + srcPath := src.Path() + destExists := true + destDir := false + rootUID, rootGID := daemon.GetRemappedUIDGID() + + // Work in daemon-local OS specific file paths + destPath = filepath.FromSlash(destPath) + + c, err := daemon.GetContainer(cID) + if err != nil { + return err + } + err = daemon.Mount(c) + if err != nil { + return err + } + defer daemon.Unmount(c) + + dest, err := c.GetResourcePath(destPath) + if err != nil { + return err + } + + // Preserve the trailing slash + // TODO: why are we appending another path separator if there was already one? + if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." { + destDir = true + dest += string(os.PathSeparator) + } + + destPath = dest + + destStat, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + //logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err) + return err + } + destExists = false + } + + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + archiver := &archive.Archiver{ + Untar: chrootarchive.Untar, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + + if src.IsDir() { + // copy as directory + if err := archiver.CopyWithTar(srcPath, destPath); err != nil { + return err + } + return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) + } + if decompress && archive.IsArchivePath(srcPath) { + // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file) + + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in slash + tarDest := destPath + if strings.HasSuffix(tarDest, string(os.PathSeparator)) { + tarDest = filepath.Dir(destPath) + } + + // try to successfully untar the orig + err := archiver.UntarPath(srcPath, tarDest) + /* + if err != nil { + logrus.Errorf("Couldn't untar to %s: %v", tarDest, err) + } + */ + return err + } + + // only needed for fixPermissions, but might as well put it before CopyFileWithTar + if destDir || (destExists && destStat.IsDir()) { + destPath = filepath.Join(destPath, src.Name()) + } + + if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil { + return err + } + if err := archiver.CopyFileWithTar(srcPath, destPath); err != nil { + return err + } + + return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) +} diff --git a/vendor/github.com/docker/docker/daemon/archive_unix.go b/vendor/github.com/docker/docker/daemon/archive_unix.go new file mode 100644 index 00000000..fcea13c9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/archive_unix.go @@ -0,0 +1,57 @@ +// +build !windows + +package daemon + +import ( + "github.com/docker/docker/container" + "os" + "path/filepath" +) + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { + var toVolume bool + for _, mnt := range container.MountPoints { + if toVolume = mnt.HasResource(absPath); toVolume { + if mnt.RW { + break + } + return false, ErrVolumeReadonly + } + } + return toVolume, nil +} + +func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { + // If the destination didn't already exist, or the destination isn't a + // directory, then we should Lchown the destination. Otherwise, we shouldn't + // Lchown the destination. + destStat, err := os.Stat(destination) + if err != nil { + // This should *never* be reached, because the destination must've already + // been created while untar-ing the context. + return err + } + doChownDestination := !destExisted || !destStat.IsDir() + + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if !doChownDestination && (source == fullpath) { + return nil + } + + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) + if err != nil { + return err + } + + fullpath = filepath.Join(destination, cleaned) + return os.Lchown(fullpath, uid, gid) + }) +} diff --git a/vendor/github.com/docker/docker/daemon/attach.go b/vendor/github.com/docker/docker/daemon/attach.go new file mode 100644 index 00000000..79e9cd51 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/attach.go @@ -0,0 +1,120 @@ +package daemon + +import ( + "fmt" + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/stdcopy" +) + +// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. +func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error { + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + if container.IsPaused() { + err := fmt.Errorf("Container %s is paused. Unpause the container before attach", prefixOrName) + return errors.NewRequestConflictError(err) + } + + inStream, outStream, errStream, err := c.GetStreams() + if err != nil { + return err + } + defer inStream.Close() + + if !container.Config.Tty && c.MuxStreams { + errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + var stdin io.ReadCloser + var stdout, stderr io.Writer + + if c.UseStdin { + stdin = inStream + } + if c.UseStdout { + stdout = outStream + } + if c.UseStderr { + stderr = errStream + } + + if err := daemon.containerAttach(container, stdin, stdout, stderr, c.Logs, c.Stream, c.DetachKeys); err != nil { + fmt.Fprintf(outStream, "Error attaching: %s\n", err) + } + return nil +} + +// ContainerAttachRaw attaches the provided streams to the container's stdio +func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error { + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + return daemon.containerAttach(container, stdin, stdout, stderr, false, stream, nil) +} + +func (daemon *Daemon) containerAttach(container *container.Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool, keys []byte) error { + if logs { + logDriver, err := daemon.getLogger(container) + if err != nil { + return err + } + cLog, ok := logDriver.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) + + LogLoop: + for { + select { + case msg, ok := <-logs.Msg: + if !ok { + break LogLoop + } + if msg.Source == "stdout" && stdout != nil { + stdout.Write(msg.Line) + } + if msg.Source == "stderr" && stderr != nil { + stderr.Write(msg.Line) + } + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + break LogLoop + } + } + } + + daemon.LogContainerEvent(container, "attach") + + //stream + if stream { + var stdinPipe io.ReadCloser + if stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debugf("Closing buffered stdin pipe") + io.Copy(w, stdin) + }() + stdinPipe = r + } + <-container.Attach(stdinPipe, stdout, stderr, keys) + // If we are in stdinonce mode, wait for the process to end + // otherwise, simply return + if container.Config.StdinOnce && !container.Config.Tty { + container.WaitStop(-1 * time.Second) + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/caps/utils_unix.go b/vendor/github.com/docker/docker/daemon/caps/utils_unix.go new file mode 100644 index 00000000..c99485f5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/caps/utils_unix.go @@ -0,0 +1,131 @@ +// +build !windows + +package caps + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/stringutils" + "github.com/syndtr/gocapability/capability" +) + +var capabilityList Capabilities + +func init() { + last := capability.CAP_LAST_CAP + // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap + if last == capability.Cap(63) { + last = capability.CAP_BLOCK_SUSPEND + } + for _, cap := range capability.List() { + if cap > last { + continue + } + capabilityList = append(capabilityList, + &CapabilityMapping{ + Key: "CAP_" + strings.ToUpper(cap.String()), + Value: cap, + }, + ) + } +} + +type ( + // CapabilityMapping maps linux capability name to its value of capability.Cap type + // Capabilities is one of the security systems in Linux Security Module (LSM) + // framework provided by the kernel. + // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html + CapabilityMapping struct { + Key string `json:"key,omitempty"` + Value capability.Cap `json:"value,omitempty"` + } + // Capabilities contains all CapabilityMapping + Capabilities []*CapabilityMapping +) + +// String returns of CapabilityMapping +func (c *CapabilityMapping) String() string { + return c.Key +} + +// GetCapability returns CapabilityMapping which contains specific key +func GetCapability(key string) *CapabilityMapping { + for _, capp := range capabilityList { + if capp.Key == key { + cpy := *capp + return &cpy + } + } + return nil +} + +// GetAllCapabilities returns all of the capabilities +func GetAllCapabilities() []string { + output := make([]string, len(capabilityList)) + for i, capability := range capabilityList { + output[i] = capability.String() + } + return output +} + +// TweakCapabilities can tweak capabilities by adding or dropping capabilities +// based on the basics capabilities. +func TweakCapabilities(basics, adds, drops []string) ([]string, error) { + var ( + newCaps []string + allCaps = GetAllCapabilities() + ) + + // FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix + // Currently they are mixed in here. We should do conversion in one place. + + // look for invalid cap in the drop list + for _, cap := range drops { + if strings.ToLower(cap) == "all" { + continue + } + + if !stringutils.InSlice(allCaps, "CAP_"+cap) { + return nil, fmt.Errorf("Unknown capability drop: %q", cap) + } + } + + // handle --cap-add=all + if stringutils.InSlice(adds, "all") { + basics = allCaps + } + + if !stringutils.InSlice(drops, "all") { + for _, cap := range basics { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + // if we don't drop `all`, add back all the non-dropped caps + if !stringutils.InSlice(drops, cap[4:]) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + } + + for _, cap := range adds { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + cap = "CAP_" + cap + + if !stringutils.InSlice(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability to add: %q", cap) + } + + // add cap if not already in the list + if !stringutils.InSlice(newCaps, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + return newCaps, nil +} diff --git a/vendor/github.com/docker/docker/daemon/changes.go b/vendor/github.com/docker/docker/daemon/changes.go new file mode 100644 index 00000000..5bc5b9d5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/changes.go @@ -0,0 +1,15 @@ +package daemon + +import "github.com/docker/docker/pkg/archive" + +// ContainerChanges returns a list of container fs changes +func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + return daemon.changes(container) +} diff --git a/vendor/github.com/docker/docker/daemon/commit.go b/vendor/github.com/docker/docker/daemon/commit.go new file mode 100644 index 00000000..7cdf80c7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/commit.go @@ -0,0 +1,233 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "runtime" + "strings" + "time" + + "github.com/docker/docker/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + containertypes "github.com/docker/engine-api/types/container" + "github.com/docker/go-connections/nat" +) + +// merge merges two Config, the image container configuration (defaults values), +// and the user container configuration, either passed by the API or generated +// by the cli. +// It will mutate the specified user configuration (userConf) with the image +// configuration where the user configuration is incomplete. +func merge(userConf, imageConf *containertypes.Config) error { + if userConf.User == "" { + userConf.User = imageConf.User + } + if len(userConf.ExposedPorts) == 0 { + userConf.ExposedPorts = imageConf.ExposedPorts + } else if imageConf.ExposedPorts != nil { + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + for port := range imageConf.ExposedPorts { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if len(userConf.Env) == 0 { + userConf.Env = imageConf.Env + } else { + for _, imageEnv := range imageConf.Env { + found := false + imageEnvKey := strings.Split(imageEnv, "=")[0] + for _, userEnv := range userConf.Env { + userEnvKey := strings.Split(userEnv, "=")[0] + if imageEnvKey == userEnvKey { + found = true + break + } + } + if !found { + userConf.Env = append(userConf.Env, imageEnv) + } + } + } + + if userConf.Labels == nil { + userConf.Labels = map[string]string{} + } + if imageConf.Labels != nil { + for l := range userConf.Labels { + imageConf.Labels[l] = userConf.Labels[l] + } + userConf.Labels = imageConf.Labels + } + + if len(userConf.Entrypoint) == 0 { + if len(userConf.Cmd) == 0 { + userConf.Cmd = imageConf.Cmd + } + + if userConf.Entrypoint == nil { + userConf.Entrypoint = imageConf.Entrypoint + } + } + if userConf.WorkingDir == "" { + userConf.WorkingDir = imageConf.WorkingDir + } + if len(userConf.Volumes) == 0 { + userConf.Volumes = imageConf.Volumes + } else { + for k, v := range imageConf.Volumes { + userConf.Volumes[k] = v + } + } + + if userConf.StopSignal == "" { + userConf.StopSignal = imageConf.StopSignal + } + return nil +} + +// Commit creates a new filesystem image from the current state of a container. +// The image can optionally be tagged into a repository. +func (daemon *Daemon) Commit(name string, c *types.ContainerCommitConfig) (string, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return "", err + } + + // It is not possible to commit a running container on Windows + if runtime.GOOS == "windows" && container.IsRunning() { + return "", fmt.Errorf("Windows does not support commit of a running container") + } + + if c.Pause && !container.IsPaused() { + daemon.containerPause(container) + defer daemon.containerUnpause(container) + } + + if c.MergeConfigs { + if err := merge(c.Config, container.Config); err != nil { + return "", err + } + } + + rwTar, err := daemon.exportContainerRw(container) + if err != nil { + return "", err + } + defer func() { + if rwTar != nil { + rwTar.Close() + } + }() + + var history []image.History + rootFS := image.NewRootFS() + + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return "", err + } + history = img.History + rootFS = img.RootFS + } + + l, err := daemon.layerStore.Register(rwTar, rootFS.ChainID()) + if err != nil { + return "", err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + h := image.History{ + Author: c.Author, + Created: time.Now().UTC(), + CreatedBy: strings.Join(container.Config.Cmd, " "), + Comment: c.Comment, + EmptyLayer: true, + } + + if diffID := l.DiffID(); layer.DigestSHA256EmptyTar != diffID { + h.EmptyLayer = false + rootFS.Append(diffID) + } + + history = append(history, h) + + config, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: c.Config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Container: container.ID, + ContainerConfig: *container.Config, + Author: c.Author, + Created: h.Created, + }, + RootFS: rootFS, + History: history, + }) + + if err != nil { + return "", err + } + + id, err := daemon.imageStore.Create(config) + if err != nil { + return "", err + } + + if container.ImageID != "" { + if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil { + return "", err + } + } + + if c.Repo != "" { + newTag, err := reference.WithName(c.Repo) // todo: should move this to API layer + if err != nil { + return "", err + } + if c.Tag != "" { + if newTag, err = reference.WithTag(newTag, c.Tag); err != nil { + return "", err + } + } + if err := daemon.TagImage(newTag, id.String()); err != nil { + return "", err + } + } + + attributes := map[string]string{ + "comment": c.Comment, + } + daemon.LogContainerEventWithAttributes(container, "commit", attributes) + return id.String(), nil +} + +func (daemon *Daemon) exportContainerRw(container *container.Container) (archive.Archive, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + archive, err := container.RWLayer.TarStream() + if err != nil { + daemon.Unmount(container) // logging is already handled in the `Unmount` function + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + archive.Close() + return container.RWLayer.Unmount() + }), + nil +} diff --git a/vendor/github.com/docker/docker/daemon/config.go b/vendor/github.com/docker/docker/daemon/config.go new file mode 100644 index 00000000..c293935c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config.go @@ -0,0 +1,358 @@ +package daemon + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/registry" + "github.com/imdario/mergo" +) + +const ( + defaultNetworkMtu = 1500 + disableNetworkBridge = "none" +) + +// flatOptions contains configuration keys +// that MUST NOT be parsed as deep structures. +// Use this to differentiate these options +// with others like the ones in CommonTLSOptions. +var flatOptions = map[string]bool{ + "cluster-store-opts": true, + "log-opts": true, +} + +// LogConfig represents the default log configuration. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type LogConfig struct { + Type string `json:"log-driver,omitempty"` + Config map[string]string `json:"log-opts,omitempty"` +} + +// CommonTLSOptions defines TLS configuration for the daemon server. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type CommonTLSOptions struct { + CAFile string `json:"tlscacert,omitempty"` + CertFile string `json:"tlscert,omitempty"` + KeyFile string `json:"tlskey,omitempty"` +} + +// CommonConfig defines the configuration of a docker daemon which are +// common across platforms. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type CommonConfig struct { + AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins + AutoRestart bool `json:"-"` + Context map[string][]string `json:"-"` + DisableBridge bool `json:"-"` + DNS []string `json:"dns,omitempty"` + DNSOptions []string `json:"dns-opts,omitempty"` + DNSSearch []string `json:"dns-search,omitempty"` + ExecOptions []string `json:"exec-opts,omitempty"` + ExecRoot string `json:"exec-root,omitempty"` + GraphDriver string `json:"storage-driver,omitempty"` + GraphOptions []string `json:"storage-opts,omitempty"` + Labels []string `json:"labels,omitempty"` + Mtu int `json:"mtu,omitempty"` + Pidfile string `json:"pidfile,omitempty"` + RawLogs bool `json:"raw-logs,omitempty"` + Root string `json:"graph,omitempty"` + SocketGroup string `json:"group,omitempty"` + TrustKeyPath string `json:"-"` + + // ClusterStore is the storage backend used for the cluster information. It is used by both + // multihost networking (to store networks and endpoints information) and by the node discovery + // mechanism. + ClusterStore string `json:"cluster-store,omitempty"` + + // ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such + // as TLS configuration settings. + ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"` + + // ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node + // discovery. This should be a 'host:port' combination on which that daemon instance is + // reachable by other hosts. + ClusterAdvertise string `json:"cluster-advertise,omitempty"` + + Debug bool `json:"debug,omitempty"` + Hosts []string `json:"hosts,omitempty"` + LogLevel string `json:"log-level,omitempty"` + TLS bool `json:"tls,omitempty"` + TLSVerify bool `json:"tlsverify,omitempty"` + + // Embedded structs that allow config + // deserialization without the full struct. + CommonTLSOptions + LogConfig + bridgeConfig // bridgeConfig holds bridge network specific configuration. + registry.ServiceOptions + + reloadLock sync.Mutex + valuesSet map[string]interface{} +} + +// InstallCommonFlags adds command-line options to the top-level flag parser for +// the current process. +// Subsequent calls to `flag.Parse` will populate config with values parsed +// from the command-line. +func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string) string) { + config.ServiceOptions.InstallCliFlags(cmd, usageFn) + + cmd.Var(opts.NewNamedListOptsRef("storage-opts", &config.GraphOptions, nil), []string{"-storage-opt"}, usageFn("Set storage driver options")) + cmd.Var(opts.NewNamedListOptsRef("authorization-plugins", &config.AuthorizationPlugins, nil), []string{"-authorization-plugin"}, usageFn("List authorization plugins in order from first evaluator to last")) + cmd.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), []string{"-exec-opt"}, usageFn("Set runtime execution options")) + cmd.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, defaultPidFile, usageFn("Path to use for daemon PID file")) + cmd.StringVar(&config.Root, []string{"g", "-graph"}, defaultGraph, usageFn("Root of the Docker runtime")) + cmd.StringVar(&config.ExecRoot, []string{"-exec-root"}, defaultExecRoot, usageFn("Root directory for execution state files")) + cmd.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, usageFn("--restart on the daemon has been deprecated in favor of --restart policies on docker run")) + cmd.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", usageFn("Storage driver to use")) + cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU")) + cmd.BoolVar(&config.RawLogs, []string{"-raw-logs"}, false, usageFn("Full timestamps without ANSI coloring")) + // FIXME: why the inconsistency between "hosts" and "sockets"? + cmd.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use")) + cmd.Var(opts.NewNamedListOptsRef("dns-opts", &config.DNSOptions, nil), []string{"-dns-opt"}, usageFn("DNS options to use")) + cmd.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use")) + cmd.Var(opts.NewNamedListOptsRef("labels", &config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon")) + cmd.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", usageFn("Default driver for container logs")) + cmd.Var(opts.NewNamedMapOpts("log-opts", config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Set log driver options")) + cmd.StringVar(&config.ClusterAdvertise, []string{"-cluster-advertise"}, "", usageFn("Address or interface name to advertise")) + cmd.StringVar(&config.ClusterStore, []string{"-cluster-store"}, "", usageFn("Set the cluster store")) + cmd.Var(opts.NewNamedMapOpts("cluster-store-opts", config.ClusterOpts, nil), []string{"-cluster-store-opt"}, usageFn("Set cluster store options")) +} + +// IsValueSet returns true if a configuration value +// was explicitly set in the configuration file. +func (config *Config) IsValueSet(name string) bool { + if config.valuesSet == nil { + return false + } + _, ok := config.valuesSet[name] + return ok +} + +// ReloadConfiguration reads the configuration in the host and reloads the daemon and server. +func ReloadConfiguration(configFile string, flags *flag.FlagSet, reload func(*Config)) error { + logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile) + newConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return err + } + + if err := validateConfiguration(newConfig); err != nil { + return fmt.Errorf("file configuration validation failed (%v)", err) + } + + reload(newConfig) + return nil +} + +// boolValue is an interface that boolean value flags implement +// to tell the command line how to make -name equivalent to -name=true. +type boolValue interface { + IsBoolFlag() bool +} + +// MergeDaemonConfigurations reads a configuration file, +// loads the file configuration in an isolated structure, +// and merges the configuration provided from flags on top +// if there are no conflicts. +func MergeDaemonConfigurations(flagsConfig *Config, flags *flag.FlagSet, configFile string) (*Config, error) { + fileConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return nil, err + } + + if err := validateConfiguration(fileConfig); err != nil { + return nil, fmt.Errorf("file configuration validation failed (%v)", err) + } + + // merge flags configuration on top of the file configuration + if err := mergo.Merge(fileConfig, flagsConfig); err != nil { + return nil, err + } + + return fileConfig, nil +} + +// getConflictFreeConfiguration loads the configuration from a JSON file. +// It compares that configuration with the one provided by the flags, +// and returns an error if there are conflicts. +func getConflictFreeConfiguration(configFile string, flags *flag.FlagSet) (*Config, error) { + b, err := ioutil.ReadFile(configFile) + if err != nil { + return nil, err + } + + var config Config + var reader io.Reader + if flags != nil { + var jsonConfig map[string]interface{} + reader = bytes.NewReader(b) + if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil { + return nil, err + } + + configSet := configValuesSet(jsonConfig) + + if err := findConfigurationConflicts(configSet, flags); err != nil { + return nil, err + } + + // Override flag values to make sure the values set in the config file with nullable values, like `false`, + // are not overriden by default truthy values from the flags that were not explicitly set. + // See https://github.com/docker/docker/issues/20289 for an example. + // + // TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers. + namedOptions := make(map[string]interface{}) + for key, value := range configSet { + f := flags.Lookup("-" + key) + if f == nil { // ignore named flags that don't match + namedOptions[key] = value + continue + } + + if _, ok := f.Value.(boolValue); ok { + f.Value.Set(fmt.Sprintf("%v", value)) + } + } + if len(namedOptions) > 0 { + // set also default for mergeVal flags that are boolValue at the same time. + flags.VisitAll(func(f *flag.Flag) { + if opt, named := f.Value.(opts.NamedOption); named { + v, set := namedOptions[opt.Name()] + _, boolean := f.Value.(boolValue) + if set && boolean { + f.Value.Set(fmt.Sprintf("%v", v)) + } + } + }) + } + + config.valuesSet = configSet + } + + reader = bytes.NewReader(b) + err = json.NewDecoder(reader).Decode(&config) + return &config, err +} + +// configValuesSet returns the configuration values explicitly set in the file. +func configValuesSet(config map[string]interface{}) map[string]interface{} { + flatten := make(map[string]interface{}) + for k, v := range config { + if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] { + for km, vm := range m { + flatten[km] = vm + } + continue + } + + flatten[k] = v + } + return flatten +} + +// findConfigurationConflicts iterates over the provided flags searching for +// duplicated configurations and unknown keys. It returns an error with all the conflicts if +// it finds any. +func findConfigurationConflicts(config map[string]interface{}, flags *flag.FlagSet) error { + // 1. Search keys from the file that we don't recognize as flags. + unknownKeys := make(map[string]interface{}) + for key, value := range config { + flagName := "-" + key + if flag := flags.Lookup(flagName); flag == nil { + unknownKeys[key] = value + } + } + + // 2. Discard values that implement NamedOption. + // Their configuration name differs from their flag name, like `labels` and `label`. + if len(unknownKeys) > 0 { + unknownNamedConflicts := func(f *flag.Flag) { + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if _, valid := unknownKeys[namedOption.Name()]; valid { + delete(unknownKeys, namedOption.Name()) + } + } + } + flags.VisitAll(unknownNamedConflicts) + } + + if len(unknownKeys) > 0 { + var unknown []string + for key := range unknownKeys { + unknown = append(unknown, key) + } + return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) + } + + var conflicts []string + printConflict := func(name string, flagValue, fileValue interface{}) string { + return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue) + } + + // 3. Search keys that are present as a flag and as a file option. + duplicatedConflicts := func(f *flag.Flag) { + // search option name in the json configuration payload if the value is a named option + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if optsValue, ok := config[namedOption.Name()]; ok { + conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue)) + } + } else { + // search flag name in the json configuration payload without trailing dashes + for _, name := range f.Names { + name = strings.TrimLeft(name, "-") + + if value, ok := config[name]; ok { + conflicts = append(conflicts, printConflict(name, f.Value.String(), value)) + break + } + } + } + } + + flags.Visit(duplicatedConflicts) + + if len(conflicts) > 0 { + return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) + } + return nil +} + +// validateConfiguration validates some specific configs. +// such as config.DNS, config.Labels, config.DNSSearch +func validateConfiguration(config *Config) error { + // validate DNS + for _, dns := range config.DNS { + if _, err := opts.ValidateIPAddress(dns); err != nil { + return err + } + } + + // validate DNSSearch + for _, dnsSearch := range config.DNSSearch { + if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil { + return err + } + } + + // validate Labels + for _, label := range config.Labels { + if _, err := opts.ValidateLabel(label); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/config_experimental.go b/vendor/github.com/docker/docker/daemon/config_experimental.go new file mode 100644 index 00000000..ceb7c382 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_experimental.go @@ -0,0 +1,8 @@ +// +build experimental + +package daemon + +import flag "github.com/docker/docker/pkg/mflag" + +func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) { +} diff --git a/vendor/github.com/docker/docker/daemon/config_stub.go b/vendor/github.com/docker/docker/daemon/config_stub.go new file mode 100644 index 00000000..796e6b6e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_stub.go @@ -0,0 +1,8 @@ +// +build !experimental + +package daemon + +import flag "github.com/docker/docker/pkg/mflag" + +func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) { +} diff --git a/vendor/github.com/docker/docker/daemon/config_unix.go b/vendor/github.com/docker/docker/daemon/config_unix.go new file mode 100644 index 00000000..5394949e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_unix.go @@ -0,0 +1,88 @@ +// +build linux freebsd + +package daemon + +import ( + "net" + + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-units" +) + +var ( + defaultPidFile = "/var/run/docker.pid" + defaultGraph = "/var/lib/docker" + defaultExecRoot = "/var/run/docker" +) + +// Config defines the configuration of a docker daemon. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type Config struct { + CommonConfig + + // Fields below here are platform specific. + + CorsHeaders string `json:"api-cors-headers,omitempty"` + EnableCors bool `json:"api-enable-cors,omitempty"` + EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"` + RemappedRoot string `json:"userns-remap,omitempty"` + CgroupParent string `json:"cgroup-parent,omitempty"` + Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"` + ContainerdAddr string `json:"containerd,omitempty"` +} + +// bridgeConfig stores all the bridge driver specific +// configuration. +type bridgeConfig struct { + EnableIPv6 bool `json:"ipv6,omitempty"` + EnableIPTables bool `json:"iptables,omitempty"` + EnableIPForward bool `json:"ip-forward,omitempty"` + EnableIPMasq bool `json:"ip-mask,omitempty"` + EnableUserlandProxy bool `json:"userland-proxy,omitempty"` + DefaultIP net.IP `json:"ip,omitempty"` + Iface string `json:"bridge,omitempty"` + IP string `json:"bip,omitempty"` + FixedCIDR string `json:"fixed-cidr,omitempty"` + FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` + DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` + DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` + InterContainerCommunication bool `json:"icc,omitempty"` +} + +// InstallFlags adds command-line options to the top-level flag parser for +// the current process. +// Subsequent calls to `flag.Parse` will populate config with values parsed +// from the command-line. +func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { + // First handle install flags which are consistent cross-platform + config.InstallCommonFlags(cmd, usageFn) + + // Then platform-specific install flags + cmd.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, usageFn("Enable selinux support")) + cmd.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", usageFn("Group for the unix socket")) + config.Ulimits = make(map[string]*units.Ulimit) + cmd.Var(runconfigopts.NewUlimitOpt(&config.Ulimits), []string{"-default-ulimit"}, usageFn("Set default ulimits for containers")) + cmd.BoolVar(&config.bridgeConfig.EnableIPTables, []string{"#iptables", "-iptables"}, true, usageFn("Enable addition of iptables rules")) + cmd.BoolVar(&config.bridgeConfig.EnableIPForward, []string{"#ip-forward", "-ip-forward"}, true, usageFn("Enable net.ipv4.ip_forward")) + cmd.BoolVar(&config.bridgeConfig.EnableIPMasq, []string{"-ip-masq"}, true, usageFn("Enable IP masquerading")) + cmd.BoolVar(&config.bridgeConfig.EnableIPv6, []string{"-ipv6"}, false, usageFn("Enable IPv6 networking")) + cmd.StringVar(&config.bridgeConfig.IP, []string{"#bip", "-bip"}, "", usageFn("Specify network bridge IP")) + cmd.StringVar(&config.bridgeConfig.Iface, []string{"b", "-bridge"}, "", usageFn("Attach containers to a network bridge")) + cmd.StringVar(&config.bridgeConfig.FixedCIDR, []string{"-fixed-cidr"}, "", usageFn("IPv4 subnet for fixed IPs")) + cmd.StringVar(&config.bridgeConfig.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", usageFn("IPv6 subnet for fixed IPs")) + cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv4, ""), []string{"-default-gateway"}, usageFn("Container default gateway IPv4 address")) + cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv6, ""), []string{"-default-gateway-v6"}, usageFn("Container default gateway IPv6 address")) + cmd.BoolVar(&config.bridgeConfig.InterContainerCommunication, []string{"#icc", "-icc"}, true, usageFn("Enable inter-container communication")) + cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultIP, "0.0.0.0"), []string{"#ip", "-ip"}, usageFn("Default IP when binding container ports")) + cmd.BoolVar(&config.bridgeConfig.EnableUserlandProxy, []string{"-userland-proxy"}, true, usageFn("Use userland proxy for loopback traffic")) + cmd.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, usageFn("Enable CORS headers in the remote API, this is deprecated by --api-cors-header")) + cmd.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", usageFn("Set CORS headers in the remote API")) + cmd.StringVar(&config.CgroupParent, []string{"-cgroup-parent"}, "", usageFn("Set parent cgroup for all containers")) + cmd.StringVar(&config.RemappedRoot, []string{"-userns-remap"}, "", usageFn("User/Group setting for user namespaces")) + cmd.StringVar(&config.ContainerdAddr, []string{"-containerd"}, "", usageFn("Path to containerd socket")) + + config.attachExperimentalFlags(cmd, usageFn) +} diff --git a/vendor/github.com/docker/docker/daemon/container_operations_unix.go b/vendor/github.com/docker/docker/daemon/container_operations_unix.go new file mode 100644 index 00000000..e4ed08c4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container_operations_unix.go @@ -0,0 +1,319 @@ +// +build linux freebsd + +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/links" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/specs/specs-go" +) + +func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } +func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + var env []string + children := daemon.children(container) + + bridgeSettings := container.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if bridgeSettings == nil { + return nil, nil + } + + for linkAlias, child := range children { + if !child.IsRunning() { + return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) + } + + childBridgeSettings := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if childBridgeSettings == nil { + return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID) + } + + link := links.NewLink( + bridgeSettings.IPAddress, + childBridgeSettings.IPAddress, + linkAlias, + child.Config.Env, + child.Config.ExposedPorts, + ) + + for _, envVar := range link.ToEnv() { + env = append(env, envVar) + } + } + + return env, nil +} + +// getSize returns the real size & virtual size of the container. +func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { + var ( + sizeRw, sizeRootfs int64 + err error + ) + + if err := daemon.Mount(container); err != nil { + logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err) + return sizeRw, sizeRootfs + } + defer daemon.Unmount(container) + + sizeRw, err = container.RWLayer.Size() + if err != nil { + logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", + daemon.GraphDriverName(), container.ID, err) + // FIXME: GetSize should return an error. Not changing it now in case + // there is a side-effect. + sizeRw = -1 + } + + if parent := container.RWLayer.Parent(); parent != nil { + sizeRootfs, err = parent.Size() + if err != nil { + sizeRootfs = -1 + } else if sizeRw != -1 { + sizeRootfs += sizeRw + } + } + return sizeRw, sizeRootfs +} + +func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) { + containerID := container.HostConfig.IpcMode.Container() + c, err := daemon.GetContainer(containerID) + if err != nil { + return nil, err + } + if !c.IsRunning() { + return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) + } + if c.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return c, nil +} + +func (daemon *Daemon) setupIpcDirs(c *container.Container) error { + var err error + + c.ShmPath, err = c.ShmResourcePath() + if err != nil { + return err + } + + if c.HostConfig.IpcMode.IsContainer() { + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + c.ShmPath = ic.ShmPath + } else if c.HostConfig.IpcMode.IsHost() { + if _, err := os.Stat("/dev/shm"); err != nil { + return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") + } + c.ShmPath = "/dev/shm" + } else { + rootUID, rootGID := daemon.GetRemappedUIDGID() + if !c.HasMountFor("/dev/shm") { + shmPath, err := c.ShmResourcePath() + if err != nil { + return err + } + + if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil { + return err + } + + shmSize := container.DefaultSHMSize + if c.HostConfig.ShmSize != 0 { + shmSize = c.HostConfig.ShmSize + } + shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10) + if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { + return fmt.Errorf("mounting shm tmpfs: %s", err) + } + if err := os.Chown(shmPath, rootUID, rootGID); err != nil { + return err + } + } + + } + + return nil +} + +func (daemon *Daemon) mountVolumes(container *container.Container) error { + mounts, err := daemon.setupMounts(container) + if err != nil { + return err + } + + for _, m := range mounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + return err + } + + var stat os.FileInfo + stat, err = os.Stat(m.Source) + if err != nil { + return err + } + if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { + return err + } + + opts := "rbind,ro" + if m.Writable { + opts = "rbind,rw" + } + + if err := mount.Mount(m.Source, dest, "bind", opts); err != nil { + return err + } + } + + return nil +} + +func killProcessDirectly(container *container.Container) error { + if _, err := container.WaitStop(10 * time.Second); err != nil { + // Ensure that we don't kill ourselves + if pid := container.GetPID(); pid != 0 { + logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID)) + if err := syscall.Kill(pid, 9); err != nil { + if err != syscall.ESRCH { + return err + } + e := errNoSuchProcess{pid, 9} + logrus.Debug(e) + return e + } + } + } + return nil +} + +func specDevice(d *configs.Device) specs.Device { + return specs.Device{ + Type: string(d.Type), + Path: d.Path, + Major: d.Major, + Minor: d.Minor, + FileMode: fmPtr(int64(d.FileMode)), + UID: u32Ptr(int64(d.Uid)), + GID: u32Ptr(int64(d.Gid)), + } +} + +func specDeviceCgroup(d *configs.Device) specs.DeviceCgroup { + t := string(d.Type) + return specs.DeviceCgroup{ + Allow: true, + Type: &t, + Major: &d.Major, + Minor: &d.Minor, + Access: &d.Permissions, + } +} + +func getDevicesFromPath(deviceMapping containertypes.DeviceMapping) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { + resolvedPathOnHost := deviceMapping.PathOnHost + + // check if it is a symbolic link + if src, e := os.Lstat(deviceMapping.PathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { + if linkedPathOnHost, e := os.Readlink(deviceMapping.PathOnHost); e == nil { + resolvedPathOnHost = linkedPathOnHost + } + } + + device, err := devices.DeviceFromPath(resolvedPathOnHost, deviceMapping.CgroupPermissions) + // if there was no error, return the device + if err == nil { + device.Path = deviceMapping.PathInContainer + return append(devs, specDevice(device)), append(devPermissions, specDeviceCgroup(device)), nil + } + + // if the device is not a device node + // try to see if it's a directory holding many devices + if err == devices.ErrNotADevice { + + // check if it is a directory + if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { + + // mount the internal devices recursively + filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { + childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions) + if e != nil { + // ignore the device + return nil + } + + // add the device to userSpecified devices + childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, deviceMapping.PathInContainer, 1) + devs = append(devs, specDevice(childDevice)) + devPermissions = append(devPermissions, specDeviceCgroup(childDevice)) + + return nil + }) + } + } + + if len(devs) > 0 { + return devs, devPermissions, nil + } + + return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err) +} + +func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Device { + if len(userDevices) == 0 { + return defaultDevices + } + + paths := map[string]*configs.Device{} + for _, d := range userDevices { + paths[d.Path] = d + } + + var devs []*configs.Device + for _, d := range defaultDevices { + if _, defined := paths[d.Path]; !defined { + devs = append(devs, d) + } + } + return append(devs, userDevices...) +} + +func detachMounted(path string) error { + return syscall.Unmount(path, syscall.MNT_DETACH) +} + +func isLinkable(child *container.Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + return ok +} + +func errRemovalContainer(containerID string) error { + return fmt.Errorf("Container %s is marked for removal and cannot be connected or disconnected to the network", containerID) +} diff --git a/vendor/github.com/docker/docker/daemon/create.go b/vendor/github.com/docker/docker/daemon/create.go new file mode 100644 index 00000000..34f0aa2d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/create.go @@ -0,0 +1,185 @@ +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + volumestore "github.com/docker/docker/volume/store" + "github.com/docker/engine-api/types" + containertypes "github.com/docker/engine-api/types/container" + networktypes "github.com/docker/engine-api/types/network" + "github.com/opencontainers/runc/libcontainer/label" +) + +// ContainerCreate creates a container. +func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (types.ContainerCreateResponse, error) { + if params.Config == nil { + return types.ContainerCreateResponse{}, fmt.Errorf("Config cannot be empty in order to create a container") + } + + warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config, false) + if err != nil { + return types.ContainerCreateResponse{Warnings: warnings}, err + } + + err = daemon.verifyNetworkingConfig(params.NetworkingConfig) + if err != nil { + return types.ContainerCreateResponse{}, err + } + + if params.HostConfig == nil { + params.HostConfig = &containertypes.HostConfig{} + } + err = daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares) + if err != nil { + return types.ContainerCreateResponse{Warnings: warnings}, err + } + + container, err := daemon.create(params) + if err != nil { + return types.ContainerCreateResponse{Warnings: warnings}, daemon.imageNotExistToErrcode(err) + } + + return types.ContainerCreateResponse{ID: container.ID, Warnings: warnings}, nil +} + +// Create creates a new container from the given configuration with a given name. +func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *container.Container, retErr error) { + var ( + container *container.Container + img *image.Image + imgID image.ID + err error + ) + + if params.Config.Image != "" { + img, err = daemon.GetImage(params.Config.Image) + if err != nil { + return nil, err + } + imgID = img.ID() + } + + if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { + return nil, err + } + + if container, err = daemon.newContainer(params.Name, params.Config, imgID); err != nil { + return nil, err + } + defer func() { + if retErr != nil { + if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err != nil { + logrus.Errorf("Clean up Error! Cannot destroy container %s: %v", container.ID, err) + } + } + }() + + if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { + return nil, err + } + + // Set RWLayer for container after mount labels have been set + if err := daemon.setRWLayer(container); err != nil { + return nil, err + } + + if err := daemon.Register(container); err != nil { + return nil, err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := daemon.setHostConfig(container, params.HostConfig); err != nil { + return nil, err + } + defer func() { + if retErr != nil { + if err := daemon.removeMountPoints(container, true); err != nil { + logrus.Error(err) + } + } + }() + + if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { + return nil, err + } + + var endpointsConfigs map[string]*networktypes.EndpointSettings + if params.NetworkingConfig != nil { + endpointsConfigs = params.NetworkingConfig.EndpointsConfig + } + + if err := daemon.updateContainerNetworkSettings(container, endpointsConfigs); err != nil { + return nil, err + } + + if err := container.ToDiskLocking(); err != nil { + logrus.Errorf("Error saving new container to disk: %v", err) + return nil, err + } + daemon.LogContainerEvent(container, "create") + return container, nil +} + +func (daemon *Daemon) generateSecurityOpt(ipcMode containertypes.IpcMode, pidMode containertypes.PidMode) ([]string, error) { + if ipcMode.IsHost() || pidMode.IsHost() { + return label.DisableSecOpt(), nil + } + if ipcContainer := ipcMode.Container(); ipcContainer != "" { + c, err := daemon.GetContainer(ipcContainer) + if err != nil { + return nil, err + } + + return label.DupSecOpt(c.ProcessLabel), nil + } + return nil, nil +} + +func (daemon *Daemon) setRWLayer(container *container.Container) error { + var layerID layer.ChainID + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return err + } + layerID = img.RootFS.ChainID() + } + rwLayer, err := daemon.layerStore.CreateRWLayer(container.ID, layerID, container.MountLabel, daemon.setupInitLayer) + if err != nil { + return err + } + container.RWLayer = rwLayer + + return nil +} + +// VolumeCreate creates a volume with the specified name, driver, and opts +// This is called directly from the remote API +func (daemon *Daemon) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) { + if name == "" { + name = stringid.GenerateNonCryptoID() + } + + v, err := daemon.volumes.Create(name, driverName, opts, labels) + if err != nil { + if volumestore.IsNameConflict(err) { + return nil, fmt.Errorf("A volume named %s already exists. Choose a different volume name.", name) + } + return nil, err + } + + daemon.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) + return volumeToAPIType(v), nil +} diff --git a/vendor/github.com/docker/docker/daemon/create_unix.go b/vendor/github.com/docker/docker/daemon/create_unix.go new file mode 100644 index 00000000..37c4a911 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/create_unix.go @@ -0,0 +1,76 @@ +// +build !windows + +package daemon + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/label" +) + +// createContainerPlatformSpecificSettings performs platform specific container create functionality +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + if err := daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := container.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + + for spec := range config.Volumes { + name := stringid.GenerateNonCryptoID() + destination := filepath.Clean(spec) + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.IsDestinationMounted(destination) { + continue + } + path, err := container.GetResourcePath(destination) + if err != nil { + return err + } + + stat, err := os.Stat(path) + if err == nil && !stat.IsDir() { + return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) + } + + v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil) + if err != nil { + return err + } + + if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { + return err + } + + container.AddMountPointWithVolume(destination, v, true) + } + return daemon.populateVolumes(container) +} + +// populateVolumes copies data from the container's rootfs into the volume for non-binds. +// this is only called when the container is created. +func (daemon *Daemon) populateVolumes(c *container.Container) error { + for _, mnt := range c.MountPoints { + if !mnt.CopyData || mnt.Volume == nil { + continue + } + + logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name) + if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon.go b/vendor/github.com/docker/docker/daemon/daemon.go new file mode 100644 index 00000000..f2499d78 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon.go @@ -0,0 +1,1536 @@ +// Package daemon exposes the functions that occur on the host server +// that the Docker daemon is running. +// +// In implementing the various functions of the daemon, there is often +// a method-specific struct for configuring the runtime behavior. +package daemon + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/api" + "github.com/docker/docker/builder" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/errors" + "github.com/docker/engine-api/types" + containertypes "github.com/docker/engine-api/types/container" + eventtypes "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" + networktypes "github.com/docker/engine-api/types/network" + registrytypes "github.com/docker/engine-api/types/registry" + "github.com/docker/engine-api/types/strslice" + // register graph drivers + _ "github.com/docker/docker/daemon/graphdriver/register" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/distribution" + dmetadata "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/tarexport" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/namesgenerator" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" + "github.com/docker/go-connections/nat" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +const ( + // maxDownloadConcurrency is the maximum number of downloads that + // may take place at a time for each pull. + maxDownloadConcurrency = 3 + // maxUploadConcurrency is the maximum number of uploads that + // may take place at a time for each push. + maxUploadConcurrency = 5 +) + +var ( + validContainerNameChars = utils.RestrictedNameChars + validContainerNamePattern = utils.RestrictedNamePattern + + errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.") +) + +// ErrImageDoesNotExist is error returned when no image can be found for a reference. +type ErrImageDoesNotExist struct { + RefOrID string +} + +func (e ErrImageDoesNotExist) Error() string { + return fmt.Sprintf("no such id: %s", e.RefOrID) +} + +// Daemon holds information about the Docker daemon. +type Daemon struct { + ID string + repository string + containers container.Store + execCommands *exec.Store + referenceStore reference.Store + downloadManager *xfer.LayerDownloadManager + uploadManager *xfer.LayerUploadManager + distributionMetadataStore dmetadata.Store + trustKey libtrust.PrivateKey + idIndex *truncindex.TruncIndex + configStore *Config + statsCollector *statsCollector + defaultLogConfig containertypes.LogConfig + RegistryService *registry.Service + EventsService *events.Events + volumes *store.VolumeStore + root string + seccompEnabled bool + shutdown bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + layerStore layer.Store + imageStore image.Store + nameIndex *registrar.Registrar + linkIndex *linkIndex + containerd libcontainerd.Client + defaultIsolation containertypes.Isolation // Default isolation mode on Windows +} + +// GetContainer looks for a container using the provided information, which could be +// one of the following inputs from the caller: +// - A full container ID, which will exact match a container in daemon's list +// - A container name, which will only exact match via the GetByName() function +// - A partial container ID prefix (e.g. short ID) of any length that is +// unique enough to only return a single container object +// If none of these searches succeed, an error is returned +func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) { + if len(prefixOrName) == 0 { + return nil, errors.NewBadRequestError(fmt.Errorf("No container name or ID supplied")) + } + + if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { + // prefix is an exact match to a full container ID + return containerByID, nil + } + + // GetByName will match only an exact name provided; we ignore errors + if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil { + // prefix is an exact match to a full container Name + return containerByName, nil + } + + containerID, indexError := daemon.idIndex.Get(prefixOrName) + if indexError != nil { + // When truncindex defines an error type, use that instead + if indexError == truncindex.ErrNotExist { + err := fmt.Errorf("No such container: %s", prefixOrName) + return nil, errors.NewRequestNotFoundError(err) + } + return nil, indexError + } + return daemon.containers.Get(containerID), nil +} + +// Exists returns a true if a container of the specified ID or name exists, +// false otherwise. +func (daemon *Daemon) Exists(id string) bool { + c, _ := daemon.GetContainer(id) + return c != nil +} + +// IsPaused returns a bool indicating if the specified container is paused. +func (daemon *Daemon) IsPaused(id string) bool { + c, _ := daemon.GetContainer(id) + return c.State.IsPaused() +} + +func (daemon *Daemon) containerRoot(id string) string { + return filepath.Join(daemon.repository, id) +} + +// Load reads the contents of a container from disk +// This is typically done at startup. +func (daemon *Daemon) load(id string) (*container.Container, error) { + container := daemon.newBaseContainer(id) + + if err := container.FromDisk(); err != nil { + return nil, err + } + + if container.ID != id { + return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) + } + + return container, nil +} + +func (daemon *Daemon) registerName(container *container.Container) error { + if daemon.Exists(container.ID) { + return fmt.Errorf("Container is already loaded") + } + if err := validateID(container.ID); err != nil { + return err + } + if container.Name == "" { + name, err := daemon.generateNewName(container.ID) + if err != nil { + return err + } + container.Name = name + + if err := container.ToDiskLocking(); err != nil { + logrus.Errorf("Error saving container name to disk: %v", err) + } + } + return daemon.nameIndex.Reserve(container.Name, container.ID) +} + +// Register makes a container object usable by the daemon as +func (daemon *Daemon) Register(c *container.Container) error { + // Attach to stdout and stderr + if c.Config.OpenStdin { + c.NewInputPipes() + } else { + c.NewNopInputPipe() + } + + daemon.containers.Add(c.ID, c) + daemon.idIndex.Add(c.ID) + + return nil +} + +func (daemon *Daemon) restore() error { + var ( + debug = utils.IsDebugEnabled() + currentDriver = daemon.GraphDriverName() + containers = make(map[string]*container.Container) + ) + + if !debug { + logrus.Info("Loading containers: start.") + } + dir, err := ioutil.ReadDir(daemon.repository) + if err != nil { + return err + } + + for _, v := range dir { + id := v.Name() + container, err := daemon.load(id) + if !debug && logrus.GetLevel() == logrus.InfoLevel { + fmt.Print(".") + } + if err != nil { + logrus.Errorf("Failed to load container %v: %v", id, err) + continue + } + + // Ignore the container if it does not support the current driver being used by the graph + if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { + rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) + if err != nil { + logrus.Errorf("Failed to load container mount %v: %v", id, err) + continue + } + container.RWLayer = rwlayer + logrus.Debugf("Loaded container %v", container.ID) + + containers[container.ID] = container + } else { + logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) + } + } + + restartContainers := make(map[*container.Container]chan struct{}) + for _, c := range containers { + if err := daemon.registerName(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + continue + } + if err := daemon.Register(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + continue + } + } + var wg sync.WaitGroup + var mapLock sync.Mutex + for _, c := range containers { + wg.Add(1) + go func(c *container.Container) { + defer wg.Done() + if c.IsRunning() || c.IsPaused() { + // Fix activityCount such that graph mounts can be unmounted later + if err := daemon.layerStore.ReinitRWLayer(c.RWLayer); err != nil { + logrus.Errorf("Failed to ReinitRWLayer for %s due to %s", c.ID, err) + return + } + if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(c.RestartManager(true))); err != nil { + logrus.Errorf("Failed to restore with containerd: %q", err) + return + } + } + // fixme: only if not running + // get list of containers we need to restart + if daemon.configStore.AutoRestart && !c.IsRunning() && !c.IsPaused() && c.ShouldRestartOnBoot() { + mapLock.Lock() + restartContainers[c] = make(chan struct{}) + mapLock.Unlock() + } + }(c) + } + wg.Wait() + + // Now that all the containers are registered, register the links + for _, c := range containers { + if err := daemon.registerLinks(c, c.HostConfig); err != nil { + logrus.Errorf("failed to register link for container %s: %v", c.ID, err) + } + } + + group := sync.WaitGroup{} + for c, notifier := range restartContainers { + group.Add(1) + + go func(c *container.Container, chNotify chan struct{}) { + defer group.Done() + + logrus.Debugf("Starting container %s", c.ID) + + // ignore errors here as this is a best effort to wait for children to be + // running before we try to start the container + children := daemon.children(c) + timeout := time.After(5 * time.Second) + for _, child := range children { + if notifier, exists := restartContainers[child]; exists { + select { + case <-notifier: + case <-timeout: + } + } + } + if err := daemon.containerStart(c); err != nil { + logrus.Errorf("Failed to start container %s: %s", c.ID, err) + } + close(chNotify) + }(c, notifier) + + } + group.Wait() + + // any containers that were started above would already have had this done, + // however we need to now prepare the mountpoints for the rest of the containers as well. + // This shouldn't cause any issue running on the containers that already had this run. + // This must be run after any containers with a restart policy so that containerized plugins + // can have a chance to be running before we try to initialize them. + for _, c := range containers { + // if the container has restart policy, do not + // prepare the mountpoints since it has been done on restarting. + // This is to speed up the daemon start when a restart container + // has a volume and the volume dirver is not available. + if _, ok := restartContainers[c]; ok { + continue + } + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.prepareMountPoints(c); err != nil { + logrus.Error(err) + } + }(c) + } + + group.Wait() + + if !debug { + if logrus.GetLevel() == logrus.InfoLevel { + fmt.Println() + } + logrus.Info("Loading containers: done.") + } + + return nil +} + +func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { + if img != nil && img.Config != nil { + if err := merge(config, img.Config); err != nil { + return err + } + } + if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { + return fmt.Errorf("No command specified") + } + return nil +} + +func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { + var ( + err error + id = stringid.GenerateNonCryptoID() + ) + + if name == "" { + if name, err = daemon.generateNewName(id); err != nil { + return "", "", err + } + return id, name, nil + } + + if name, err = daemon.reserveName(id, name); err != nil { + return "", "", err + } + + return id, name, nil +} + +func (daemon *Daemon) reserveName(id, name string) (string, error) { + if !validContainerNamePattern.MatchString(name) { + return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + } + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + id, err := daemon.nameIndex.Get(name) + if err != nil { + logrus.Errorf("got unexpected error while looking up reserved name: %v", err) + return "", err + } + return "", fmt.Errorf("Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", name, id) + } + return "", fmt.Errorf("error reserving name: %s, error: %v", name, err) + } + return name, nil +} + +func (daemon *Daemon) releaseName(name string) { + daemon.nameIndex.Release(name) +} + +func (daemon *Daemon) generateNewName(id string) (string, error) { + var name string + for i := 0; i < 6; i++ { + name = namesgenerator.GetRandomName(i) + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + continue + } + return "", err + } + return name, nil + } + + name = "/" + stringid.TruncateID(id) + if err := daemon.nameIndex.Reserve(name, id); err != nil { + return "", err + } + return name, nil +} + +func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) { + // Generate default hostname + if config.Hostname == "" { + config.Hostname = id[:12] + } +} + +func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint strslice.StrSlice, configCmd strslice.StrSlice) (string, []string) { + if len(configEntrypoint) != 0 { + return configEntrypoint[0], append(configEntrypoint[1:], configCmd...) + } + return configCmd[0], configCmd[1:] +} + +func (daemon *Daemon) newContainer(name string, config *containertypes.Config, imgID image.ID) (*container.Container, error) { + var ( + id string + err error + noExplicitName = name == "" + ) + id, name, err = daemon.generateIDAndName(name) + if err != nil { + return nil, err + } + + daemon.generateHostname(id, config) + entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) + + base := daemon.newBaseContainer(id) + base.Created = time.Now().UTC() + base.Path = entrypoint + base.Args = args //FIXME: de-duplicate from config + base.Config = config + base.HostConfig = &containertypes.HostConfig{} + base.ImageID = imgID + base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} + base.Name = name + base.Driver = daemon.GraphDriverName() + + return base, err +} + +// GetByName returns a container given a name. +func (daemon *Daemon) GetByName(name string) (*container.Container, error) { + if len(name) == 0 { + return nil, fmt.Errorf("No container name supplied") + } + fullName := name + if name[0] != '/' { + fullName = "/" + name + } + id, err := daemon.nameIndex.Get(fullName) + if err != nil { + return nil, fmt.Errorf("Could not find entity for %s", name) + } + e := daemon.containers.Get(id) + if e == nil { + return nil, fmt.Errorf("Could not find container for entity id %s", id) + } + return e, nil +} + +// SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events. +func (daemon *Daemon) SubscribeToEvents(since, sinceNano int64, filter filters.Args) ([]eventtypes.Message, chan interface{}) { + ef := events.NewFilter(filter) + return daemon.EventsService.SubscribeTopic(since, sinceNano, ef) +} + +// UnsubscribeFromEvents stops the event subscription for a client by closing the +// channel where the daemon sends events to. +func (daemon *Daemon) UnsubscribeFromEvents(listener chan interface{}) { + daemon.EventsService.Evict(listener) +} + +// GetLabels for a container or image id +func (daemon *Daemon) GetLabels(id string) map[string]string { + // TODO: TestCase + container := daemon.containers.Get(id) + if container != nil { + return container.Config.Labels + } + + img, err := daemon.GetImage(id) + if err == nil { + return img.ContainerConfig.Labels + } + return nil +} + +func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.children(c) +} + +// parents returns the names of the parent containers of the container +// with the given name. +func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.parents(c) +} + +func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { + fullName := path.Join(parent.Name, alias) + if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { + if err == registrar.ErrNameReserved { + logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) + return nil + } + return err + } + daemon.linkIndex.link(parent, child, fullName) + return nil +} + +// NewDaemon sets up everything for the daemon to be able to service +// requests from the webserver. +func NewDaemon(config *Config, registryService *registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) { + setDefaultMtu(config) + + // Ensure we have compatible and valid configuration options + if err := verifyDaemonSettings(config); err != nil { + return nil, err + } + + // Do we have a disabled network? + config.DisableBridge = isBridgeNetworkDisabled(config) + + // Verify the platform is supported as a daemon + if !platformSupported { + return nil, errSystemNotSupported + } + + // Validate platform-specific requirements + if err := checkSystem(); err != nil { + return nil, err + } + + // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event + // on Windows to dump Go routine stacks + setupDumpStackTrap() + + uidMaps, gidMaps, err := setupRemappedRoot(config) + if err != nil { + return nil, err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + + // get the canonical path to the Docker root directory + var realRoot string + if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { + realRoot = config.Root + } else { + realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) + } + } + + if err = setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { + return nil, err + } + + // set up the tmpDir to use a canonical path + tmp, err := tempDir(config.Root, rootUID, rootGID) + if err != nil { + return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) + } + realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) + } + os.Setenv("TMPDIR", realTmp) + + d := &Daemon{configStore: config} + // Ensure the daemon is properly shutdown if there is a failure during + // initialization + defer func() { + if err != nil { + if err := d.Shutdown(); err != nil { + logrus.Error(err) + } + } + }() + + // Set the default isolation mode (only applicable on Windows) + if err := d.setDefaultIsolation(); err != nil { + return nil, fmt.Errorf("error setting default isolation mode: %v", err) + } + + // Verify logging driver type + if config.LogConfig.Type != "none" { + if _, err := logger.GetLogDriver(config.LogConfig.Type); err != nil { + return nil, fmt.Errorf("error finding the logging driver: %v", err) + } + } + logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) + + if err := configureMaxThreads(config); err != nil { + logrus.Warnf("Failed to configure golang's threads limit: %v", err) + } + + installDefaultAppArmorProfile() + daemonRepo := filepath.Join(config.Root, "containers") + if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + driverName := os.Getenv("DOCKER_DRIVER") + if driverName == "" { + driverName = config.GraphDriver + } + d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ + StorePath: config.Root, + MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), + GraphDriver: driverName, + GraphDriverOptions: config.GraphOptions, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + }) + if err != nil { + return nil, err + } + + graphDriver := d.layerStore.DriverName() + imageRoot := filepath.Join(config.Root, "image", graphDriver) + + // Configure and validate the kernels security support + if err := configureKernelSecuritySupport(config, graphDriver); err != nil { + return nil, err + } + + d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, maxDownloadConcurrency) + d.uploadManager = xfer.NewLayerUploadManager(maxUploadConcurrency) + + ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) + if err != nil { + return nil, err + } + + d.imageStore, err = image.NewImageStore(ifs, d.layerStore) + if err != nil { + return nil, err + } + + // Configure the volumes driver + volStore, err := configureVolumes(config, rootUID, rootGID) + if err != nil { + return nil, err + } + + trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) + if err != nil { + return nil, err + } + + trustDir := filepath.Join(config.Root, "trust") + + if err := system.MkdirAll(trustDir, 0700); err != nil { + return nil, err + } + + distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) + if err != nil { + return nil, err + } + + eventsService := events.New() + + referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) + } + + if err := restoreCustomImage(d.imageStore, d.layerStore, referenceStore); err != nil { + return nil, fmt.Errorf("Couldn't restore custom images: %s", err) + } + + sysInfo := sysinfo.New(false) + // Check if Devices cgroup is mounted, it is hard requirement for container security, + // on Linux/FreeBSD. + if runtime.GOOS != "windows" && !sysInfo.CgroupDevicesEnabled { + return nil, fmt.Errorf("Devices cgroup isn't mounted") + } + + d.ID = trustKey.PublicKey().KeyID() + d.repository = daemonRepo + d.containers = container.NewMemoryStore() + d.execCommands = exec.NewStore() + d.referenceStore = referenceStore + d.distributionMetadataStore = distributionMetadataStore + d.trustKey = trustKey + d.idIndex = truncindex.NewTruncIndex([]string{}) + d.statsCollector = d.newStatsCollector(1 * time.Second) + d.defaultLogConfig = containertypes.LogConfig{ + Type: config.LogConfig.Type, + Config: config.LogConfig.Config, + } + d.RegistryService = registryService + d.EventsService = eventsService + d.volumes = volStore + d.root = config.Root + d.uidMaps = uidMaps + d.gidMaps = gidMaps + d.seccompEnabled = sysInfo.Seccomp + + d.nameIndex = registrar.NewRegistrar() + d.linkIndex = newLinkIndex() + + go d.execCommandGC() + + d.containerd, err = containerdRemote.Client(d) + if err != nil { + return nil, err + } + + if err := d.restore(); err != nil { + return nil, err + } + + return d, nil +} + +func (daemon *Daemon) shutdownContainer(c *container.Container) error { + // TODO(windows): Handle docker restart with paused containers + if c.IsPaused() { + // To terminate a process in freezer cgroup, we should send + // SIGTERM to this process then unfreeze it, and the process will + // force to terminate immediately. + logrus.Debugf("Found container %s is paused, sending SIGTERM before unpause it", c.ID) + sig, ok := signal.SignalMap["TERM"] + if !ok { + return fmt.Errorf("System doesn not support SIGTERM") + } + if err := daemon.kill(c, int(sig)); err != nil { + return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) + } + if err := daemon.containerUnpause(c); err != nil { + return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) + } + if _, err := c.WaitStop(10 * time.Second); err != nil { + logrus.Debugf("container %s failed to exit in 10 second of SIGTERM, sending SIGKILL to force", c.ID) + sig, ok := signal.SignalMap["KILL"] + if !ok { + return fmt.Errorf("System does not support SIGKILL") + } + if err := daemon.kill(c, int(sig)); err != nil { + logrus.Errorf("Failed to SIGKILL container %s", c.ID) + } + c.WaitStop(-1 * time.Second) + return err + } + } + // If container failed to exit in 10 seconds of SIGTERM, then using the force + if err := daemon.containerStop(c, 10); err != nil { + return fmt.Errorf("Stop container %s with error: %v", c.ID, err) + } + + c.WaitStop(-1 * time.Second) + return nil +} + +// Shutdown stops the daemon. +func (daemon *Daemon) Shutdown() error { + daemon.shutdown = true + if daemon.containers != nil { + logrus.Debug("starting clean shutdown of all containers...") + daemon.containers.ApplyAll(func(c *container.Container) { + if !c.IsRunning() { + return + } + logrus.Debugf("stopping %s", c.ID) + if err := daemon.shutdownContainer(c); err != nil { + logrus.Errorf("Stop container error: %v", err) + return + } + if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + logrus.Debugf("container stopped %s", c.ID) + }) + } + + if daemon.layerStore != nil { + if err := daemon.layerStore.Cleanup(); err != nil { + logrus.Errorf("Error during layer Store.Cleanup(): %v", err) + } + } + + if err := daemon.cleanupMounts(); err != nil { + return err + } + + return nil +} + +// Mount sets container.BaseFS +// (is it not set coming in? why is it unset?) +func (daemon *Daemon) Mount(container *container.Container) error { + dir, err := container.RWLayer.Mount(container.GetMountLabel()) + if err != nil { + return err + } + logrus.Debugf("container mounted via layerStore: %v", dir) + + if container.BaseFS != dir { + // The mount path reported by the graph driver should always be trusted on Windows, since the + // volume path for a given mounted layer may change over time. This should only be an error + // on non-Windows operating systems. + if container.BaseFS != "" && runtime.GOOS != "windows" { + daemon.Unmount(container) + return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", + daemon.GraphDriverName(), container.ID, container.BaseFS, dir) + } + } + container.BaseFS = dir // TODO: combine these fields + return nil +} + +// Unmount unsets the container base filesystem +func (daemon *Daemon) Unmount(container *container.Container) error { + if err := container.RWLayer.Unmount(); err != nil { + logrus.Errorf("Error unmounting container %s: %s", container.ID, err) + return err + } + return nil +} + +func (daemon *Daemon) kill(c *container.Container, sig int) error { + return daemon.containerd.Signal(c.ID, sig) +} + +func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} { + return daemon.statsCollector.collect(c) +} + +func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) { + daemon.statsCollector.unsubscribe(c, ch) +} + +func (daemon *Daemon) changes(container *container.Container) ([]archive.Change, error) { + return container.RWLayer.Changes() +} + +// TagImage creates the tag specified by newTag, pointing to the image named +// imageName (alternatively, imageName can also be an image ID). +func (daemon *Daemon) TagImage(newTag reference.Named, imageName string) error { + imageID, err := daemon.GetImageID(imageName) + if err != nil { + return err + } + if err := daemon.referenceStore.AddTag(newTag, imageID, true); err != nil { + return err + } + + daemon.LogImageEvent(imageID.String(), newTag.String(), "tag") + return nil +} + +func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { + progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) + operationCancelled := false + + for prog := range progressChan { + if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { + // don't log broken pipe errors as this is the normal case when a client aborts + if isBrokenPipe(err) { + logrus.Info("Pull session cancelled") + } else { + logrus.Errorf("error writing progress to client: %v", err) + } + cancelFunc() + operationCancelled = true + // Don't return, because we need to continue draining + // progressChan until it's closed to avoid a deadlock. + } + } +} + +func isBrokenPipe(e error) bool { + if netErr, ok := e.(*net.OpError); ok { + e = netErr.Err + if sysErr, ok := netErr.Err.(*os.SyscallError); ok { + e = sysErr.Err + } + } + return e == syscall.EPIPE +} + +// PullImage initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func (daemon *Daemon) PullImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + writeDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + imagePullConfig := &distribution.ImagePullConfig{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.distributionMetadataStore, + ImageStore: daemon.imageStore, + ReferenceStore: daemon.referenceStore, + DownloadManager: daemon.downloadManager, + } + + err := distribution.Pull(ctx, ref, imagePullConfig) + close(progressChan) + <-writesDone + return err +} + +// PullOnBuild tells Docker to pull image referenced by `name`. +func (daemon *Daemon) PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) { + ref, err := reference.ParseNamed(name) + if err != nil { + return nil, err + } + ref = reference.WithDefaultTag(ref) + + pullRegistryAuth := &types.AuthConfig{} + if len(authConfigs) > 0 { + // The request came with a full auth config file, we prefer to use that + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, err + } + + resolvedConfig := registry.ResolveAuthConfig( + authConfigs, + repoInfo.Index, + ) + pullRegistryAuth = &resolvedConfig + } + + if err := daemon.PullImage(ctx, ref, nil, pullRegistryAuth, output); err != nil { + return nil, err + } + return daemon.GetImage(name) +} + +// ExportImage exports a list of images to the given output stream. The +// exported images are archived into a tar when written to the output +// stream. All images with the given tag and all versions containing +// the same tag are exported. names is the set of tags to export, and +// outStream is the writer which the images are written to. +func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore) + return imageExporter.Save(names, outStream) +} + +// PushImage initiates a push operation on the repository named localName. +func (daemon *Daemon) PushImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + writeDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + imagePushConfig := &distribution.ImagePushConfig{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.distributionMetadataStore, + LayerStore: daemon.layerStore, + ImageStore: daemon.imageStore, + ReferenceStore: daemon.referenceStore, + TrustKey: daemon.trustKey, + UploadManager: daemon.uploadManager, + } + + err := distribution.Push(ctx, ref, imagePushConfig) + close(progressChan) + <-writesDone + return err +} + +// LookupImage looks up an image by name and returns it as an ImageInspect +// structure. +func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, fmt.Errorf("No such image: %s", name) + } + + refs := daemon.referenceStore.References(img.ID()) + repoTags := []string{} + repoDigests := []string{} + for _, ref := range refs { + switch ref.(type) { + case reference.NamedTagged: + repoTags = append(repoTags, ref.String()) + case reference.Canonical: + repoDigests = append(repoDigests, ref.String()) + } + } + + var size int64 + var layerMetadata map[string]string + layerID := img.RootFS.ChainID() + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + size, err = l.Size() + if err != nil { + return nil, err + } + + layerMetadata, err = l.Metadata() + if err != nil { + return nil, err + } + } + + comment := img.Comment + if len(comment) == 0 && len(img.History) > 0 { + comment = img.History[len(img.History)-1].Comment + } + + imageInspect := &types.ImageInspect{ + ID: img.ID().String(), + RepoTags: repoTags, + RepoDigests: repoDigests, + Parent: img.Parent.String(), + Comment: comment, + Created: img.Created.Format(time.RFC3339Nano), + Container: img.Container, + ContainerConfig: &img.ContainerConfig, + DockerVersion: img.DockerVersion, + Author: img.Author, + Config: img.Config, + Architecture: img.Architecture, + Os: img.OS, + Size: size, + VirtualSize: size, // TODO: field unused, deprecate + RootFS: rootFSToAPIType(img.RootFS), + } + + imageInspect.GraphDriver.Name = daemon.GraphDriverName() + + imageInspect.GraphDriver.Data = layerMetadata + + return imageInspect, nil +} + +// LoadImage uploads a set of images into the repository. This is the +// complement of ImageExport. The input stream is an uncompressed tar +// ball containing images and metadata. +func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore) + return imageExporter.Load(inTar, outStream, quiet) +} + +// ImageHistory returns a slice of ImageHistory structures for the specified image +// name by walking the image lineage. +func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + + history := []*types.ImageHistory{} + + layerCounter := 0 + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + for _, h := range img.History { + var layerSize int64 + + if !h.EmptyLayer { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, fmt.Errorf("too many non-empty layers in History section") + } + + rootFS.Append(img.RootFS.DiffIDs[layerCounter]) + l, err := daemon.layerStore.Get(rootFS.ChainID()) + if err != nil { + return nil, err + } + layerSize, err = l.DiffSize() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + + layerCounter++ + } + + history = append([]*types.ImageHistory{{ + ID: "", + Created: h.Created.Unix(), + CreatedBy: h.CreatedBy, + Comment: h.Comment, + Size: layerSize, + }}, history...) + } + + // Fill in image IDs and tags + histImg := img + id := img.ID() + for _, h := range history { + h.ID = id.String() + + var tags []string + for _, r := range daemon.referenceStore.References(id) { + if _, ok := r.(reference.NamedTagged); ok { + tags = append(tags, r.String()) + } + } + + h.Tags = tags + + id = histImg.Parent + if id == "" { + break + } + histImg, err = daemon.GetImage(id.String()) + if err != nil { + break + } + } + + return history, nil +} + +// GetImageID returns an image ID corresponding to the image referred to by +// refOrID. +func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { + id, ref, err := reference.ParseIDOrReference(refOrID) + if err != nil { + return "", err + } + if id != "" { + if _, err := daemon.imageStore.Get(image.ID(id)); err != nil { + return "", ErrImageDoesNotExist{refOrID} + } + return image.ID(id), nil + } + + if id, err := daemon.referenceStore.Get(ref); err == nil { + return id, nil + } + if tagged, ok := ref.(reference.NamedTagged); ok { + if id, err := daemon.imageStore.Search(tagged.Tag()); err == nil { + for _, namedRef := range daemon.referenceStore.References(id) { + if namedRef.Name() == ref.Name() { + return id, nil + } + } + } + } + + // Search based on ID + if id, err := daemon.imageStore.Search(refOrID); err == nil { + return id, nil + } + + return "", ErrImageDoesNotExist{refOrID} +} + +// GetImage returns an image corresponding to the image referred to by refOrID. +func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { + imgID, err := daemon.GetImageID(refOrID) + if err != nil { + return nil, err + } + return daemon.imageStore.Get(imgID) +} + +// GetImageOnBuild looks up a Docker image referenced by `name`. +func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + return img, nil +} + +// GraphDriverName returns the name of the graph driver used by the layer.Store +func (daemon *Daemon) GraphDriverName() string { + return daemon.layerStore.DriverName() +} + +// GetUIDGIDMaps returns the current daemon's user namespace settings +// for the full uid and gid maps which will be applied to containers +// started in this instance. +func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { + return daemon.uidMaps, daemon.gidMaps +} + +// GetRemappedUIDGID returns the current daemon's uid and gid values +// if user namespaces are in use for this daemon instance. If not +// this function will return "real" root values of 0, 0. +func (daemon *Daemon) GetRemappedUIDGID() (int, int) { + uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + return uid, gid +} + +// GetCachedImage returns the most recent created image that is a child +// of the image with imgID, that had the same config when it was +// created. nil is returned if a child cannot be found. An error is +// returned if the parent image cannot be found. +func (daemon *Daemon) GetCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) { + // Loop on the children of the given image and check the config + getMatch := func(siblings []image.ID) (*image.Image, error) { + var match *image.Image + for _, id := range siblings { + img, err := daemon.imageStore.Get(id) + if err != nil { + return nil, fmt.Errorf("unable to find image %q", id) + } + + if runconfig.Compare(&img.ContainerConfig, config) { + // check for the most up to date match + if match == nil || match.Created.Before(img.Created) { + match = img + } + } + } + return match, nil + } + + // In this case, this is `FROM scratch`, which isn't an actual image. + if imgID == "" { + images := daemon.imageStore.Map() + var siblings []image.ID + for id, img := range images { + if img.Parent == imgID { + siblings = append(siblings, id) + } + } + return getMatch(siblings) + } + + // find match from child images + siblings := daemon.imageStore.Children(imgID) + return getMatch(siblings) +} + +// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent` +// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. +func (daemon *Daemon) GetCachedImageOnBuild(imgID string, cfg *containertypes.Config) (string, error) { + cache, err := daemon.GetCachedImage(image.ID(imgID), cfg) + if cache == nil || err != nil { + return "", err + } + return cache.ID().String(), nil +} + +// tempDir returns the default directory to use for temporary files. +func tempDir(rootDir string, rootUID, rootGID int) (string, error) { + var tmpDir string + if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { + tmpDir = filepath.Join(rootDir, "tmp") + } + return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) +} + +func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + return parseSecurityOpt(container, hostConfig) +} + +func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error { + // Do not lock while creating volumes since this could be calling out to external plugins + // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin + if err := daemon.registerMountPoints(container, hostConfig); err != nil { + return err + } + + container.Lock() + defer container.Unlock() + + // Register any links from the host config before starting the container + if err := daemon.registerLinks(container, hostConfig); err != nil { + return err + } + + // make sure links is not nil + // this ensures that on the next daemon restart we don't try to migrate from legacy sqlite links + if hostConfig.Links == nil { + hostConfig.Links = []string{} + } + + container.HostConfig = hostConfig + return container.ToDisk() +} + +func (daemon *Daemon) setupInitLayer(initPath string) error { + rootUID, rootGID := daemon.GetRemappedUIDGID() + return setupInitLayer(initPath, rootUID, rootGID) +} + +func setDefaultMtu(config *Config) { + // do nothing if the config does not have the default 0 value. + if config.Mtu != 0 { + return + } + config.Mtu = defaultNetworkMtu +} + +// verifyContainerSettings performs validation of the hostconfig and config +// structures. +func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + + // First perform verification of settings common across all platforms. + if config != nil { + if config.WorkingDir != "" { + config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics + if !system.IsAbs(config.WorkingDir) { + return nil, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path.", config.WorkingDir) + } + } + + if len(config.StopSignal) > 0 { + _, err := signal.ParseSignal(config.StopSignal) + if err != nil { + return nil, err + } + } + } + + if hostConfig == nil { + return nil, nil + } + + logCfg := daemon.getLogConfig(hostConfig.LogConfig) + if err := logger.ValidateLogOpts(logCfg.Type, logCfg.Config); err != nil { + return nil, err + } + + for port := range hostConfig.PortBindings { + _, portStr := nat.SplitProtoPort(string(port)) + if _, err := nat.ParsePort(portStr); err != nil { + return nil, fmt.Errorf("Invalid port specification: %q", portStr) + } + for _, pb := range hostConfig.PortBindings[port] { + _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) + if err != nil { + return nil, fmt.Errorf("Invalid port specification: %q", pb.HostPort) + } + } + } + + // Now do platform-specific verification + return verifyPlatformContainerSettings(daemon, hostConfig, config, update) +} + +// Checks if the client set configurations for more than one network while creating a container +func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { + if nwConfig == nil || len(nwConfig.EndpointsConfig) <= 1 { + return nil + } + l := make([]string, 0, len(nwConfig.EndpointsConfig)) + for k := range nwConfig.EndpointsConfig { + l = append(l, k) + } + err := fmt.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", ")) + return errors.NewBadRequestError(err) +} + +func configureVolumes(config *Config, rootUID, rootGID int) (*store.VolumeStore, error) { + volumesDriver, err := local.New(config.Root, rootUID, rootGID) + if err != nil { + return nil, err + } + + volumedrivers.Register(volumesDriver, volumesDriver.Name()) + return store.New(config.Root) +} + +// AuthenticateToRegistry checks the validity of credentials in authConfig +func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) { + return daemon.RegistryService.Auth(authConfig, dockerversion.DockerUserAgent(ctx)) +} + +// SearchRegistryForImages queries the registry for images matching +// term. authConfig is used to login. +func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, term string, + authConfig *types.AuthConfig, + headers map[string][]string) (*registrytypes.SearchResults, error) { + return daemon.RegistryService.Search(term, authConfig, dockerversion.DockerUserAgent(ctx), headers) +} + +// IsShuttingDown tells whether the daemon is shutting down or not +func (daemon *Daemon) IsShuttingDown() bool { + return daemon.shutdown +} + +// GetContainerStats collects all the stats published by a container +func (daemon *Daemon) GetContainerStats(container *container.Container) (*types.StatsJSON, error) { + stats, err := daemon.stats(container) + if err != nil { + return nil, err + } + + return stats, nil +} + +// newBaseContainer creates a new container with its initial +// configuration based on the root storage from the daemon. +func (daemon *Daemon) newBaseContainer(id string) *container.Container { + return container.NewBaseContainer(id, daemon.containerRoot(id)) +} + +// Reload reads configuration changes and modifies the +// daemon according to those changes. +// This are the settings that Reload changes: +// - Daemon labels. +// - Cluster discovery (reconfigure and restart). +func (daemon *Daemon) Reload(config *Config) error { + daemon.configStore.reloadLock.Lock() + defer daemon.configStore.reloadLock.Unlock() + if config.IsValueSet("labels") { + daemon.configStore.Labels = config.Labels + } + if config.IsValueSet("debug") { + daemon.configStore.Debug = config.Debug + } + return nil +} + +func validateID(id string) error { + if id == "" { + return fmt.Errorf("Invalid empty id") + } + return nil +} + +func isBridgeNetworkDisabled(config *Config) bool { + return config.bridgeConfig.Iface == disableNetworkBridge +} + +func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { + out := make([]types.BlkioStatEntry, len(entries)) + for i, re := range entries { + out[i] = types.BlkioStatEntry{ + Major: re.Major, + Minor: re.Minor, + Op: re.Op, + Value: re.Value, + } + } + return out +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_experimental.go b/vendor/github.com/docker/docker/daemon/daemon_experimental.go new file mode 100644 index 00000000..3fd0e765 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_experimental.go @@ -0,0 +1,9 @@ +// +build experimental + +package daemon + +import "github.com/docker/engine-api/types/container" + +func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_linux.go b/vendor/github.com/docker/docker/daemon/daemon_linux.go new file mode 100644 index 00000000..9bdf6e2b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_linux.go @@ -0,0 +1,80 @@ +package daemon + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" +) + +func (daemon *Daemon) cleanupMountsByID(id string) error { + logrus.Debugf("Cleaning up old mountid %s: start.", id) + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return err + } + defer f.Close() + + return daemon.cleanupMountsFromReaderByID(f, id, mount.Unmount) +} + +func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, unmount func(target string) error) error { + if daemon.root == "" { + return nil + } + var errors []string + + regexps := getCleanPatterns(id) + sc := bufio.NewScanner(reader) + for sc.Scan() { + if fields := strings.Fields(sc.Text()); len(fields) >= 4 { + if mnt := fields[4]; strings.HasPrefix(mnt, daemon.root) { + for _, p := range regexps { + if p.MatchString(mnt) { + if err := unmount(mnt); err != nil { + logrus.Error(err) + errors = append(errors, err.Error()) + } + } + } + } + } + } + + if err := sc.Err(); err != nil { + return err + } + + if len(errors) > 0 { + return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n")) + } + + logrus.Debugf("Cleaning up old mountid %v: done.", id) + return nil +} + +// cleanupMounts umounts shm/mqueue mounts for old containers +func (daemon *Daemon) cleanupMounts() error { + return daemon.cleanupMountsByID("") +} + +func getCleanPatterns(id string) (regexps []*regexp.Regexp) { + var patterns []string + if id == "" { + id = "[0-9a-f]{64}" + patterns = append(patterns, "containers/"+id+"/shm") + } + patterns = append(patterns, "aufs/mnt/"+id+"$", "overlay/"+id+"/merged$", "zfs/graph/"+id+"$") + for _, p := range patterns { + r, err := regexp.Compile(p) + if err == nil { + regexps = append(regexps, r) + } + } + return +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_stub.go b/vendor/github.com/docker/docker/daemon/daemon_stub.go new file mode 100644 index 00000000..40e8ddc8 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_stub.go @@ -0,0 +1,9 @@ +// +build !experimental + +package daemon + +import "github.com/docker/engine-api/types/container" + +func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_unix.go b/vendor/github.com/docker/docker/daemon/daemon_unix.go new file mode 100644 index 00000000..1e2e5ceb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_unix.go @@ -0,0 +1,955 @@ +// +build linux freebsd + +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "runtime/debug" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/reference" + "github.com/docker/docker/runconfig" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + pblkiodev "github.com/docker/engine-api/types/blkiodev" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/runc/libcontainer/user" + "github.com/opencontainers/specs/specs-go" +) + +const ( + // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 + linuxMinCPUShares = 2 + linuxMaxCPUShares = 262144 + platformSupported = true + // It's not kernel limit, we want this 4M limit to supply a reasonable functional container + linuxMinMemory = 4194304 + // constants for remapped root settings + defaultIDSpecifier string = "default" + defaultRemappedID string = "dockremap" + + // constant for cgroup drivers + cgroupFsDriver = "cgroupfs" + cgroupSystemdDriver = "systemd" +) + +func getMemoryResources(config containertypes.Resources) *specs.Memory { + memory := specs.Memory{} + + if config.Memory > 0 { + limit := uint64(config.Memory) + memory.Limit = &limit + } + + if config.MemoryReservation > 0 { + reservation := uint64(config.MemoryReservation) + memory.Reservation = &reservation + } + + if config.MemorySwap != 0 { + swap := uint64(config.MemorySwap) + memory.Swap = &swap + } + + if config.MemorySwappiness != nil { + swappiness := uint64(*config.MemorySwappiness) + memory.Swappiness = &swappiness + } + + if config.KernelMemory != 0 { + kernelMemory := uint64(config.KernelMemory) + memory.Kernel = &kernelMemory + } + + return &memory +} + +func getCPUResources(config containertypes.Resources) *specs.CPU { + cpu := specs.CPU{} + + if config.CPUShares != 0 { + shares := uint64(config.CPUShares) + cpu.Shares = &shares + } + + if config.CpusetCpus != "" { + cpuset := config.CpusetCpus + cpu.Cpus = &cpuset + } + + if config.CpusetMems != "" { + cpuset := config.CpusetMems + cpu.Mems = &cpuset + } + + if config.CPUPeriod != 0 { + period := uint64(config.CPUPeriod) + cpu.Period = &period + } + + if config.CPUQuota != 0 { + quota := uint64(config.CPUQuota) + cpu.Quota = "a + } + + return &cpu +} + +func getBlkioWeightDevices(config containertypes.Resources) ([]specs.WeightDevice, error) { + var stat syscall.Stat_t + var blkioWeightDevices []specs.WeightDevice + + for _, weightDevice := range config.BlkioWeightDevice { + if err := syscall.Stat(weightDevice.Path, &stat); err != nil { + return nil, err + } + weight := weightDevice.Weight + d := specs.WeightDevice{Weight: &weight} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioWeightDevices = append(blkioWeightDevices, d) + } + + return blkioWeightDevices, nil +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + var ( + labelOpts []string + err error + ) + + for _, opt := range config.SecurityOpt { + if opt == "no-new-privileges" { + container.NoNewPrivileges = true + } else { + var con []string + if strings.Contains(opt, "=") { + con = strings.SplitN(opt, "=", 2) + } else if strings.Contains(opt, ":") { + con = strings.SplitN(opt, ":", 2) + logrus.Warnf("Security options with `:` as a separator are deprecated and will be completely unsupported in 1.13, use `=` instead.") + } + + if len(con) != 2 { + return fmt.Errorf("Invalid --security-opt 1: %q", opt) + } + + switch con[0] { + case "label": + labelOpts = append(labelOpts, con[1]) + case "apparmor": + container.AppArmorProfile = con[1] + case "seccomp": + container.SeccompProfile = con[1] + default: + return fmt.Errorf("Invalid --security-opt 2: %q", opt) + } + } + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func getBlkioReadIOpsDevices(config containertypes.Resources) ([]specs.ThrottleDevice, error) { + var blkioReadIOpsDevice []specs.ThrottleDevice + var stat syscall.Stat_t + + for _, iopsDevice := range config.BlkioDeviceReadIOps { + if err := syscall.Stat(iopsDevice.Path, &stat); err != nil { + return nil, err + } + rate := iopsDevice.Rate + d := specs.ThrottleDevice{Rate: &rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioReadIOpsDevice = append(blkioReadIOpsDevice, d) + } + + return blkioReadIOpsDevice, nil +} + +func getBlkioWriteIOpsDevices(config containertypes.Resources) ([]specs.ThrottleDevice, error) { + var blkioWriteIOpsDevice []specs.ThrottleDevice + var stat syscall.Stat_t + + for _, iopsDevice := range config.BlkioDeviceWriteIOps { + if err := syscall.Stat(iopsDevice.Path, &stat); err != nil { + return nil, err + } + rate := iopsDevice.Rate + d := specs.ThrottleDevice{Rate: &rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioWriteIOpsDevice = append(blkioWriteIOpsDevice, d) + } + + return blkioWriteIOpsDevice, nil +} + +func getBlkioReadBpsDevices(config containertypes.Resources) ([]specs.ThrottleDevice, error) { + var blkioReadBpsDevice []specs.ThrottleDevice + var stat syscall.Stat_t + + for _, bpsDevice := range config.BlkioDeviceReadBps { + if err := syscall.Stat(bpsDevice.Path, &stat); err != nil { + return nil, err + } + rate := bpsDevice.Rate + d := specs.ThrottleDevice{Rate: &rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioReadBpsDevice = append(blkioReadBpsDevice, d) + } + + return blkioReadBpsDevice, nil +} + +func getBlkioWriteBpsDevices(config containertypes.Resources) ([]specs.ThrottleDevice, error) { + var blkioWriteBpsDevice []specs.ThrottleDevice + var stat syscall.Stat_t + + for _, bpsDevice := range config.BlkioDeviceWriteBps { + if err := syscall.Stat(bpsDevice.Path, &stat); err != nil { + return nil, err + } + rate := bpsDevice.Rate + d := specs.ThrottleDevice{Rate: &rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioWriteBpsDevice = append(blkioWriteBpsDevice, d) + } + + return blkioWriteBpsDevice, nil +} + +func checkKernelVersion(k, major, minor int) bool { + if v, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("%s", err) + } else { + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} + +func checkKernel() error { + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather + // test for specific functionalities. + // Unfortunately we can't test for the feature "does not cause a kernel panic" + // without actually causing a kernel panic, so we need this workaround until + // the circumstances of pre-3.10 crashes are clearer. + // For details see https://github.com/docker/docker/issues/407 + if !checkKernelVersion(3, 10, 0) { + v, _ := kernel.GetKernelVersion() + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + logrus.Warnf("Your Linux kernel version %s can be unstable running docker. Please upgrade your kernel to 3.10.0.", v.String()) + } + } + return nil +} + +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if adjustCPUShares && hostConfig.CPUShares > 0 { + // Handle unsupported CPUShares + if hostConfig.CPUShares < linuxMinCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) + hostConfig.CPUShares = linuxMinCPUShares + } else if hostConfig.CPUShares > linuxMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) + hostConfig.CPUShares = linuxMaxCPUShares + } + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { + // By default, MemorySwap is set to twice the size of Memory. + hostConfig.MemorySwap = hostConfig.Memory * 2 + } + if hostConfig.ShmSize == 0 { + hostConfig.ShmSize = container.DefaultSHMSize + } + var err error + if hostConfig.SecurityOpt == nil { + hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode) + if err != nil { + return err + } + } + if hostConfig.MemorySwappiness == nil { + defaultSwappiness := int64(-1) + hostConfig.MemorySwappiness = &defaultSwappiness + } + if hostConfig.OomKillDisable == nil { + defaultOomKillDisable := false + hostConfig.OomKillDisable = &defaultOomKillDisable + } + + return nil +} + +func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) { + warnings := []string{} + + // memory subsystem checks and adjustments + if resources.Memory != 0 && resources.Memory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") + } + if resources.Memory > 0 && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") + resources.Memory = 0 + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") + logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.") + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.") + } + if resources.Memory == 0 && resources.MemorySwap > 0 && !update { + return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.") + } + if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + resources.MemorySwappiness = nil + } + if resources.MemorySwappiness != nil { + swappiness := *resources.MemorySwappiness + if swappiness < -1 || swappiness > 100 { + return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100.", swappiness) + } + } + if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory soft limit capabilities. Limitation discarded.") + resources.MemoryReservation = 0 + } + if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage.") + } + if resources.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + resources.KernelMemory = 0 + } + if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") + } + if resources.KernelMemory > 0 && !checkKernelVersion(4, 0, 0) { + warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + logrus.Warnf("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + } + if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { + // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point + // warning the caller if they already wanted the feature to be off + if *resources.OomKillDisable { + warnings = append(warnings, "Your kernel does not support OomKillDisable, OomKillDisable discarded.") + logrus.Warnf("Your kernel does not support OomKillDisable, OomKillDisable discarded.") + } + resources.OomKillDisable = nil + } + + if resources.PidsLimit != 0 && !sysInfo.PidsLimit { + warnings = append(warnings, "Your kernel does not support pids limit capabilities, pids limit discarded.") + logrus.Warnf("Your kernel does not support pids limit capabilities, pids limit discarded.") + resources.PidsLimit = 0 + } + + // cpu subsystem checks and adjustments + if resources.CPUShares > 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.") + logrus.Warnf("Your kernel does not support CPU shares. Shares discarded.") + resources.CPUShares = 0 + } + if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") + logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.") + resources.CPUPeriod = 0 + } + if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") + logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.") + resources.CPUQuota = 0 + } + + // cpuset subsystem checks and adjustments + if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.") + logrus.Warnf("Your kernel does not support cpuset. Cpuset discarded.") + resources.CpusetCpus = "" + resources.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset cpus.", resources.CpusetCpus) + } + if !cpusAvailable { + return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s.", resources.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset mems.", resources.CpusetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s.", resources.CpusetMems, sysInfo.Mems) + } + + // blkio subsystem checks and adjustments + if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.") + logrus.Warnf("Your kernel does not support Block I/O weight. Weight discarded.") + resources.BlkioWeight = 0 + } + if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { + return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000.") + } + if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { + warnings = append(warnings, "Your kernel does not support Block I/O weight_device.") + logrus.Warnf("Your kernel does not support Block I/O weight_device. Weight-device discarded.") + resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} + } + if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { + warnings = append(warnings, "Your kernel does not support Block read limit in bytes per second.") + logrus.Warnf("Your kernel does not support Block I/O read limit in bytes per second. --device-read-bps discarded.") + resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { + warnings = append(warnings, "Your kernel does not support Block write limit in bytes per second.") + logrus.Warnf("Your kernel does not support Block I/O write limit in bytes per second. --device-write-bps discarded.") + resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { + warnings = append(warnings, "Your kernel does not support Block read limit in IO per second.") + logrus.Warnf("Your kernel does not support Block I/O read limit in IO per second. -device-read-iops discarded.") + resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { + warnings = append(warnings, "Your kernel does not support Block write limit in IO per second.") + logrus.Warnf("Your kernel does not support Block I/O write limit in IO per second. --device-write-iops discarded.") + resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} + } + + return warnings, nil +} + +func (daemon *Daemon) getCgroupDriver() string { + cgroupDriver := cgroupFsDriver + + if UsingSystemd(daemon.configStore) { + cgroupDriver = cgroupSystemdDriver + } + return cgroupDriver +} + +// getCD gets the raw value of the native.cgroupdriver option, if set. +func getCD(config *Config) string { + for _, option := range config.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { + continue + } + return val + } + return "" +} + +// VerifyCgroupDriver validates native.cgroupdriver +func VerifyCgroupDriver(config *Config) error { + cd := getCD(config) + if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { + return nil + } + return fmt.Errorf("native.cgroupdriver option %s not supported", cd) +} + +// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd +func UsingSystemd(config *Config) bool { + return getCD(config) == cgroupSystemdDriver +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + sysInfo := sysinfo.New(true) + + warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) + if err != nil { + return warnings, err + } + + w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update) + if err != nil { + return warnings, err + } + warnings = append(warnings, w...) + + if hostConfig.ShmSize < 0 { + return warnings, fmt.Errorf("SHM size must be greater then 0") + } + + if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { + return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000].", hostConfig.OomScoreAdj) + } + if sysInfo.IPv4ForwardingDisabled { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") + } + // check for various conflicting options with user namespaces + if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { + if hostConfig.Privileged { + return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces") + } + if hostConfig.NetworkMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host's network namespace when user namespaces are enabled") + } + if hostConfig.PidMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled") + } + if hostConfig.ReadonlyRootfs { + return warnings, fmt.Errorf("Cannot use the --read-only option when user namespaces are enabled") + } + } + if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { + // CgroupParent for systemd cgroup should be named as "xxx.slice" + if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { + return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + return warnings, nil +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + // Check for mutually incompatible config options + if config.bridgeConfig.Iface != "" && config.bridgeConfig.IP != "" { + return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") + } + if !config.bridgeConfig.EnableIPTables && !config.bridgeConfig.InterContainerCommunication { + return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") + } + if !config.bridgeConfig.EnableIPTables && config.bridgeConfig.EnableIPMasq { + config.bridgeConfig.EnableIPMasq = false + } + if err := VerifyCgroupDriver(config); err != nil { + return err + } + if config.CgroupParent != "" && UsingSystemd(config) { + if len(config.CgroupParent) <= 6 || !strings.HasSuffix(config.CgroupParent, ".slice") { + return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + if os.Geteuid() != 0 { + return fmt.Errorf("The Docker daemon needs to be run as root") + } + return checkKernel() +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *Config) error { + mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max") + if err != nil { + return err + } + mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) + if err != nil { + return err + } + maxThreads := (mtint / 100) * 90 + debug.SetMaxThreads(maxThreads) + logrus.Debugf("Golang's threads limit set to %d", maxThreads) + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + if config.EnableSelinuxSupport { + if selinuxEnabled() { + // As Docker on overlayFS and SELinux are incompatible at present, error on overlayfs being enabled + if driverName == "overlay" { + return fmt.Errorf("SELinux is not supported with the %s graph driver", driverName) + } + logrus.Debug("SELinux enabled successfully") + } else { + logrus.Warn("Docker could not enable SELinux on the host system") + } + } else { + selinuxSetDisabled() + } + return nil +} + +// setupInitLayer populates a directory with mountpoints suitable +// for bind-mounting things into the container. +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func setupInitLayer(initLayer string, rootUID, rootGID int) error { + for pth, typ := range map[string]string{ + "/dev/pts": "dir", + "/dev/shm": "dir", + "/proc": "dir", + "/sys": "dir", + "/.dockerenv": "file", + "/etc/resolv.conf": "file", + "/etc/hosts": "file", + "/etc/hostname": "file", + "/dev/console": "file", + "/etc/mtab": "/proc/mounts", + } { + parts := strings.Split(pth, "/") + prev := "/" + for _, p := range parts[1:] { + prev = filepath.Join(prev, p) + syscall.Unlink(filepath.Join(initLayer, prev)) + } + + if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { + if os.IsNotExist(err) { + if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil { + return err + } + switch typ { + case "dir": + if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil { + return err + } + case "file": + f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) + if err != nil { + return err + } + f.Chown(rootUID, rootGID) + f.Close() + default: + if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { + return err + } + } + } else { + return err + } + } + } + + // Layer is ready to use, if it wasn't before. + return nil +} + +// Parse the remapped root (user namespace) option, which can be one of: +// username - valid username from /etc/passwd +// username:groupname - valid username; valid groupname from /etc/group +// uid - 32-bit unsigned int valid Linux UID value +// uid:gid - uid value; 32-bit unsigned int Linux GID value +// +// If no groupname is specified, and a username is specified, an attempt +// will be made to lookup a gid for that username as a groupname +// +// If names are used, they are verified to exist in passwd/group +func parseRemappedRoot(usergrp string) (string, string, error) { + + var ( + userID, groupID int + username, groupname string + ) + + idparts := strings.Split(usergrp, ":") + if len(idparts) > 2 { + return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) + } + + if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { + // must be a uid; take it as valid + userID = int(uid) + luser, err := user.LookupUid(userID) + if err != nil { + return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) + } + username = luser.Name + if len(idparts) == 1 { + // if the uid was numeric and no gid was specified, take the uid as the gid + groupID = userID + lgrp, err := user.LookupGid(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) + } + groupname = lgrp.Name + } + } else { + lookupName := idparts[0] + // special case: if the user specified "default", they want Docker to create or + // use (after creation) the "dockremap" user/group for root remapping + if lookupName == defaultIDSpecifier { + lookupName = defaultRemappedID + } + luser, err := user.LookupUser(lookupName) + if err != nil && idparts[0] != defaultIDSpecifier { + // error if the name requested isn't the special "dockremap" ID + return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) + } else if err != nil { + // special case-- if the username == "default", then we have been asked + // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} + // ranges will be used for the user and group mappings in user namespaced containers + _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) + if err == nil { + return defaultRemappedID, defaultRemappedID, nil + } + return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) + } + username = luser.Name + if len(idparts) == 1 { + // we only have a string username, and no group specified; look up gid from username as group + group, err := user.LookupGroup(lookupName) + if err != nil { + return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) + } + groupID = group.Gid + groupname = group.Name + } + } + + if len(idparts) == 2 { + // groupname or gid is separately specified and must be resolved + // to a unsigned 32-bit gid + if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { + // must be a gid, take it as valid + groupID = int(gid) + lgrp, err := user.LookupGid(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) + } + groupname = lgrp.Name + } else { + // not a number; attempt a lookup + if _, err := user.LookupGroup(idparts[1]); err != nil { + return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) + } + groupname = idparts[1] + } + } + return username, groupname, nil +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + if runtime.GOOS != "linux" && config.RemappedRoot != "" { + return nil, nil, fmt.Errorf("User namespaces are only supported on Linux") + } + + // if the daemon was started with remapped root option, parse + // the config option to the int uid,gid values + var ( + uidMaps, gidMaps []idtools.IDMap + ) + if config.RemappedRoot != "" { + username, groupname, err := parseRemappedRoot(config.RemappedRoot) + if err != nil { + return nil, nil, err + } + if username == "root" { + // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op + // effectively + logrus.Warnf("User namespaces: root cannot be remapped with itself; user namespaces are OFF") + return uidMaps, gidMaps, nil + } + logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) + // update remapped root setting now that we have resolved them to actual names + config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) + + uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname) + if err != nil { + return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err) + } + } + return uidMaps, gidMaps, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + config.Root = rootDir + // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) + // so that syscalls executing as non-root, operating on subdirectories of the graph root + // (e.g. mounted layers of a container) can traverse this path. + // The user namespace support will create subdirectories for the remapped root host uid:gid + // pair owned by that same uid:gid pair for proper write access to those needed metadata and + // layer content subtrees. + if _, err := os.Stat(rootDir); err == nil { + // root current exists; verify the access bits are correct by setting them + if err = os.Chmod(rootDir, 0711); err != nil { + return err + } + } else if os.IsNotExist(err) { + // no root exists yet, create it 0711 with root:root ownership + if err := os.MkdirAll(rootDir, 0711); err != nil { + return err + } + } + + // if user namespaces are enabled we will create a subtree underneath the specified root + // with any/all specified remapped root uid/gid options on the daemon creating + // a new subdirectory with ownership set to the remapped uid/gid (so as to allow + // `chdir()` to work for containers namespaced to that uid/gid) + if config.RemappedRoot != "" { + config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootUID, rootGID)) + logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) + // Create the root directory if it doesn't exists + if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil { + return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) + } + } + return nil +} + +// registerLinks writes the links to a file. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { + return nil + } + + for _, l := range hostConfig.Links { + name, alias, err := runconfigopts.ParseLink(l) + if err != nil { + return err + } + child, err := daemon.GetContainer(name) + if err != nil { + //An error from daemon.GetContainer() means this name could not be found + return fmt.Errorf("Could not get container for %s", name) + } + for child.HostConfig.NetworkMode.IsContainer() { + parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) + child, err = daemon.GetContainer(parts[1]) + if err != nil { + return fmt.Errorf("Could not get container for %s", parts[1]) + } + } + if child.HostConfig.NetworkMode.IsHost() { + return runconfig.ErrConflictHostNetworkAndLinks + } + if err := daemon.registerLink(container, child, alias); err != nil { + return err + } + } + + // After we load all the links into the daemon + // set them to nil on the hostconfig + return container.WriteHostConfig() +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error { + // Unix has no custom images to register + return nil +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + if !c.IsRunning() { + return nil, errNotRunning{c.ID} + } + stats, err := daemon.containerd.Stats(c.ID) + if err != nil { + return nil, err + } + s := &types.StatsJSON{} + cgs := stats.CgroupStats + if cgs != nil { + s.BlkioStats = types.BlkioStats{ + IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive), + IoServicedRecursive: copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive), + IoQueuedRecursive: copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive), + IoServiceTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive), + IoWaitTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive), + IoMergedRecursive: copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive), + IoTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive), + SectorsRecursive: copyBlkioEntry(cgs.BlkioStats.SectorsRecursive), + } + cpu := cgs.CpuStats + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: cpu.CpuUsage.TotalUsage, + PercpuUsage: cpu.CpuUsage.PercpuUsage, + UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, + UsageInUsermode: cpu.CpuUsage.UsageInUsermode, + }, + ThrottlingData: types.ThrottlingData{ + Periods: cpu.ThrottlingData.Periods, + ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, + ThrottledTime: cpu.ThrottlingData.ThrottledTime, + }, + } + mem := cgs.MemoryStats.Usage + s.MemoryStats = types.MemoryStats{ + Usage: mem.Usage, + MaxUsage: mem.MaxUsage, + Stats: cgs.MemoryStats.Stats, + Failcnt: mem.Failcnt, + Limit: mem.Limit, + } + // if the container does not set memory limit, use the machineMemory + if mem.Limit > daemon.statsCollector.machineMemory && daemon.statsCollector.machineMemory > 0 { + s.MemoryStats.Limit = daemon.statsCollector.machineMemory + } + if cgs.PidsStats != nil { + s.PidsStats = types.PidsStats{ + Current: cgs.PidsStats.Current, + } + } + } + s.Read = time.Unix(int64(stats.Timestamp), 0) + return s, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_unsupported.go b/vendor/github.com/docker/docker/daemon/daemon_unsupported.go new file mode 100644 index 00000000..987528f4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux,!freebsd,!windows + +package daemon + +const platformSupported = false diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_unix.go b/vendor/github.com/docker/docker/daemon/debugtrap_unix.go new file mode 100644 index 00000000..c4a11b07 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/debugtrap_unix.go @@ -0,0 +1,21 @@ +// +build !windows + +package daemon + +import ( + "os" + "os/signal" + "syscall" + + psignal "github.com/docker/docker/pkg/signal" +) + +func setupDumpStackTrap() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGUSR1) + go func() { + for range c { + psignal.DumpStacks() + } + }() +} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go b/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go new file mode 100644 index 00000000..fef1bd77 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!darwin,!freebsd,!windows + +package daemon + +func setupDumpStackTrap() { + return +} diff --git a/vendor/github.com/docker/docker/daemon/delete.go b/vendor/github.com/docker/docker/daemon/delete.go new file mode 100644 index 00000000..ec9d5c5f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/delete.go @@ -0,0 +1,157 @@ +package daemon + +import ( + "fmt" + "os" + "path" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/errors" + "github.com/docker/docker/layer" + volumestore "github.com/docker/docker/volume/store" + "github.com/docker/engine-api/types" +) + +// ContainerRm removes the container id from the filesystem. An error +// is returned if the container is not found, or if the remove +// fails. If the remove succeeds, the container name is released, and +// network links are removed. +func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + // Container state RemovalInProgress should be used to avoid races. + if inProgress := container.SetRemovalInProgress(); inProgress { + return nil + } + defer container.ResetRemovalInProgress() + + // check if container wasn't deregistered by previous rm since Get + if c := daemon.containers.Get(container.ID); c == nil { + return nil + } + + if config.RemoveLink { + return daemon.rmLink(container, name) + } + + err = daemon.cleanupContainer(container, config.ForceRemove) + if err == nil || config.ForceRemove { + if e := daemon.removeMountPoints(container, config.RemoveVolume); e != nil { + logrus.Error(e) + } + } + + return err +} + +func (daemon *Daemon) rmLink(container *container.Container, name string) error { + if name[0] != '/' { + name = "/" + name + } + parent, n := path.Split(name) + if parent == "/" { + return fmt.Errorf("Conflict, cannot remove the default name of the container") + } + + parent = strings.TrimSuffix(parent, "/") + pe, err := daemon.nameIndex.Get(parent) + if err != nil { + return fmt.Errorf("Cannot get parent %s for name %s", parent, name) + } + + daemon.releaseName(name) + parentContainer, _ := daemon.GetContainer(pe) + if parentContainer != nil { + daemon.linkIndex.unlink(name, container, parentContainer) + if err := daemon.updateNetwork(parentContainer); err != nil { + logrus.Debugf("Could not update network to remove link %s: %v", n, err) + } + } + return nil +} + +// cleanupContainer unregisters a container from the daemon, stops stats +// collection and cleanly removes contents and metadata from the filesystem. +func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) { + if container.IsRunning() { + if !forceRemove { + err := fmt.Errorf("You cannot remove a running container %s. Stop the container before attempting removal or use -f", container.ID) + return errors.NewRequestConflictError(err) + } + if err := daemon.Kill(container); err != nil { + return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err) + } + } + + // stop collection of stats for the container regardless + // if stats are currently getting collected. + daemon.statsCollector.stopCollection(container) + + if err = daemon.containerStop(container, 3); err != nil { + return err + } + + // Mark container dead. We don't want anybody to be restarting it. + container.SetDead() + + // Save container state to disk. So that if error happens before + // container meta file got removed from disk, then a restart of + // docker should not make a dead container alive. + if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) { + logrus.Errorf("Error saving dying container to disk: %v", err) + } + + // If force removal is required, delete container from various + // indexes even if removal failed. + defer func() { + if err == nil || forceRemove { + daemon.nameIndex.Delete(container.ID) + daemon.linkIndex.delete(container) + selinuxFreeLxcContexts(container.ProcessLabel) + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + daemon.LogContainerEvent(container, "destroy") + } + }() + + if err = os.RemoveAll(container.Root); err != nil { + return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) + } + + // When container creation fails and `RWLayer` has not been created yet, we + // do not call `ReleaseRWLayer` + if container.RWLayer != nil { + metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) + layer.LogReleaseMetadata(metadata) + if err != nil && err != layer.ErrMountDoesNotExist { + return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err) + } + } + + return nil +} + +// VolumeRm removes the volume with the given name. +// If the volume is referenced by a container it is not removed +// This is called directly from the remote API +func (daemon *Daemon) VolumeRm(name string) error { + v, err := daemon.volumes.Get(name) + if err != nil { + return err + } + + if err := daemon.volumes.Remove(v); err != nil { + if volumestore.IsInUse(err) { + err := fmt.Errorf("Unable to remove volume, volume still in use: %v", err) + return errors.NewRequestConflictError(err) + } + return fmt.Errorf("Error while removing volume %s: %v", name, err) + } + daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/errors.go b/vendor/github.com/docker/docker/daemon/errors.go new file mode 100644 index 00000000..131c9a1e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/errors.go @@ -0,0 +1,57 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/errors" + "github.com/docker/docker/reference" +) + +func (d *Daemon) imageNotExistToErrcode(err error) error { + if dne, isDNE := err.(ErrImageDoesNotExist); isDNE { + if strings.Contains(dne.RefOrID, "@") { + e := fmt.Errorf("No such image: %s", dne.RefOrID) + return errors.NewRequestNotFoundError(e) + } + tag := reference.DefaultTag + ref, err := reference.ParseNamed(dne.RefOrID) + if err != nil { + e := fmt.Errorf("No such image: %s:%s", dne.RefOrID, tag) + return errors.NewRequestNotFoundError(e) + } + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } + e := fmt.Errorf("No such image: %s:%s", ref.Name(), tag) + return errors.NewRequestNotFoundError(e) + } + return err +} + +type errNotRunning struct { + containerID string +} + +func (e errNotRunning) Error() string { + return fmt.Sprintf("Container %s is not running", e.containerID) +} + +func (e errNotRunning) ContainerIsRunning() bool { + return false +} + +func errContainerIsRestarting(containerID string) error { + err := fmt.Errorf("Container %s is restarting, wait until the container is running", containerID) + return errors.NewRequestConflictError(err) +} + +func errExecNotFound(id string) error { + err := fmt.Errorf("No such exec instance '%s' found in daemon", id) + return errors.NewRequestNotFoundError(err) +} + +func errExecPaused(id string) error { + err := fmt.Errorf("Container %s is paused, unpause the container before exec", id) + return errors.NewRequestConflictError(err) +} diff --git a/vendor/github.com/docker/docker/daemon/events.go b/vendor/github.com/docker/docker/daemon/events.go new file mode 100644 index 00000000..7ffffbb6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events.go @@ -0,0 +1,71 @@ +package daemon + +import ( + "strings" + + "github.com/docker/docker/container" + "github.com/docker/engine-api/types/events" +) + +// LogContainerEvent generates an event related to a container with only the default attributes. +func (daemon *Daemon) LogContainerEvent(container *container.Container, action string) { + daemon.LogContainerEventWithAttributes(container, action, map[string]string{}) +} + +// LogContainerEventWithAttributes generates an event related to a container with specific given attributes. +func (daemon *Daemon) LogContainerEventWithAttributes(container *container.Container, action string, attributes map[string]string) { + copyAttributes(attributes, container.Config.Labels) + if container.Config.Image != "" { + attributes["image"] = container.Config.Image + } + attributes["name"] = strings.TrimLeft(container.Name, "/") + + actor := events.Actor{ + ID: container.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.ContainerEventType, actor) +} + +// LogImageEvent generates an event related to a container with only the default attributes. +func (daemon *Daemon) LogImageEvent(imageID, refName, action string) { + daemon.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) +} + +// LogImageEventWithAttributes generates an event related to a container with specific given attributes. +func (daemon *Daemon) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { + img, err := daemon.GetImage(imageID) + if err == nil && img.Config != nil { + // image has not been removed yet. + // it could be missing if the event is `delete`. + copyAttributes(attributes, img.Config.Labels) + } + if refName != "" { + attributes["name"] = refName + } + actor := events.Actor{ + ID: imageID, + Attributes: attributes, + } + + daemon.EventsService.Log(action, events.ImageEventType, actor) +} + +// LogVolumeEvent generates an event related to a volume. +func (daemon *Daemon) LogVolumeEvent(volumeID, action string, attributes map[string]string) { + actor := events.Actor{ + ID: volumeID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.VolumeEventType, actor) +} + +// copyAttributes guarantees that labels are not mutated by event triggers. +func copyAttributes(attributes, labels map[string]string) { + if labels == nil { + return + } + for k, v := range labels { + attributes[k] = v + } +} diff --git a/vendor/github.com/docker/docker/daemon/events/events.go b/vendor/github.com/docker/docker/daemon/events/events.go new file mode 100644 index 00000000..ac1c98cd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/events.go @@ -0,0 +1,142 @@ +package events + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/pubsub" + eventtypes "github.com/docker/engine-api/types/events" +) + +const ( + eventsLimit = 64 + bufferSize = 1024 +) + +// Events is pubsub channel for events generated by the engine. +type Events struct { + mu sync.Mutex + events []eventtypes.Message + pub *pubsub.Publisher +} + +// New returns new *Events instance +func New() *Events { + return &Events{ + events: make([]eventtypes.Message, 0, eventsLimit), + pub: pubsub.NewPublisher(100*time.Millisecond, bufferSize), + } +} + +// Subscribe adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion), and a function to call +// to stop the stream of events. +func (e *Events) Subscribe() ([]eventtypes.Message, chan interface{}, func()) { + e.mu.Lock() + current := make([]eventtypes.Message, len(e.events)) + copy(current, e.events) + l := e.pub.Subscribe() + e.mu.Unlock() + + cancel := func() { + e.Evict(l) + } + return current, l, cancel +} + +// SubscribeTopic adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion). +func (e *Events) SubscribeTopic(since, sinceNano int64, ef *Filter) ([]eventtypes.Message, chan interface{}) { + e.mu.Lock() + + var topic func(m interface{}) bool + if ef != nil && ef.filter.Len() > 0 { + topic = func(m interface{}) bool { return ef.Include(m.(eventtypes.Message)) } + } + + buffered := e.loadBufferedEvents(since, sinceNano, topic) + + var ch chan interface{} + if topic != nil { + ch = e.pub.SubscribeTopic(topic) + } else { + // Subscribe to all events if there are no filters + ch = e.pub.Subscribe() + } + + e.mu.Unlock() + return buffered, ch +} + +// Evict evicts listener from pubsub +func (e *Events) Evict(l chan interface{}) { + e.pub.Evict(l) +} + +// Log broadcasts event to listeners. Each listener has 100 millisecond for +// receiving event or it will be skipped. +func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { + now := time.Now().UTC() + jm := eventtypes.Message{ + Action: action, + Type: eventType, + Actor: actor, + Time: now.Unix(), + TimeNano: now.UnixNano(), + } + + // fill deprecated fields for container and images + switch eventType { + case eventtypes.ContainerEventType: + jm.ID = actor.ID + jm.Status = action + jm.From = actor.Attributes["image"] + case eventtypes.ImageEventType: + jm.ID = actor.ID + jm.Status = action + } + + e.mu.Lock() + if len(e.events) == cap(e.events) { + // discard oldest event + copy(e.events, e.events[1:]) + e.events[len(e.events)-1] = jm + } else { + e.events = append(e.events, jm) + } + e.mu.Unlock() + e.pub.Publish(jm) +} + +// SubscribersCount returns number of event listeners +func (e *Events) SubscribersCount() int { + return e.pub.Len() +} + +// loadBufferedEvents iterates over the cached events in the buffer +// and returns those that were emitted before a specific date. +// The date is splitted in two values: +// - the `since` argument is a date timestamp without nanoseconds, or -1 to return an empty slice. +// - the `sinceNano` argument is the nanoseconds offset from the timestamp. +// It uses `time.Unix(seconds, nanoseconds)` to generate a valid date with those two first arguments. +// It filters those buffered messages with a topic function if it's not nil, otherwise it adds all messages. +func (e *Events) loadBufferedEvents(since, sinceNano int64, topic func(interface{}) bool) []eventtypes.Message { + var buffered []eventtypes.Message + if since == -1 { + return buffered + } + + sinceNanoUnix := time.Unix(since, sinceNano).UnixNano() + for i := len(e.events) - 1; i >= 0; i-- { + ev := e.events[i] + if ev.TimeNano < sinceNanoUnix { + break + } + if topic == nil || topic(ev) { + buffered = append([]eventtypes.Message{ev}, buffered...) + } + } + return buffered +} diff --git a/vendor/github.com/docker/docker/daemon/events/filter.go b/vendor/github.com/docker/docker/daemon/events/filter.go new file mode 100644 index 00000000..8936e371 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/filter.go @@ -0,0 +1,82 @@ +package events + +import ( + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" +) + +// Filter can filter out docker events from a stream +type Filter struct { + filter filters.Args +} + +// NewFilter creates a new Filter +func NewFilter(filter filters.Args) *Filter { + return &Filter{filter: filter} +} + +// Include returns true when the event ev is included by the filters +func (ef *Filter) Include(ev events.Message) bool { + return ef.filter.ExactMatch("event", ev.Action) && + ef.filter.ExactMatch("type", ev.Type) && + ef.matchContainer(ev) && + ef.matchVolume(ev) && + ef.matchNetwork(ev) && + ef.matchImage(ev) && + ef.matchLabels(ev.Actor.Attributes) +} + +func (ef *Filter) matchLabels(attributes map[string]string) bool { + if !ef.filter.Include("label") { + return true + } + return ef.filter.MatchKVList("label", attributes) +} + +func (ef *Filter) matchContainer(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ContainerEventType) +} + +func (ef *Filter) matchVolume(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.VolumeEventType) +} + +func (ef *Filter) matchNetwork(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.NetworkEventType) +} + +func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool { + return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) || + ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"]) +} + +// matchImage matches against both event.Actor.ID (for image events) +// and event.Actor.Attributes["image"] (for container events), so that any container that was created +// from an image will be included in the image events. Also compare both +// against the stripped repo name without any tags. +func (ef *Filter) matchImage(ev events.Message) bool { + id := ev.Actor.ID + nameAttr := "image" + var imageName string + + if ev.Type == events.ImageEventType { + nameAttr = "name" + } + + if n, ok := ev.Actor.Attributes[nameAttr]; ok { + imageName = n + } + return ef.filter.ExactMatch("image", id) || + ef.filter.ExactMatch("image", imageName) || + ef.filter.ExactMatch("image", stripTag(id)) || + ef.filter.ExactMatch("image", stripTag(imageName)) +} + +func stripTag(image string) string { + ref, err := reference.ParseNamed(image) + if err != nil { + return image + } + return ref.Name() +} diff --git a/vendor/github.com/docker/docker/daemon/exec.go b/vendor/github.com/docker/docker/daemon/exec.go new file mode 100644 index 00000000..be06845c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec.go @@ -0,0 +1,246 @@ +package daemon + +import ( + "fmt" + "io" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/errors" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/term" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/strslice" +) + +func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) { + // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. + container.ExecCommands.Add(config.ID, config) + // Storing execs in daemon for easy access via remote API. + d.execCommands.Add(config.ID, config) +} + +// ExecExists looks up the exec instance and returns a bool if it exists or not. +// It will also return the error produced by `getConfig` +func (d *Daemon) ExecExists(name string) (bool, error) { + if _, err := d.getExecConfig(name); err != nil { + return false, err + } + return true, nil +} + +// getExecConfig looks up the exec instance by name. If the container associated +// with the exec instance is stopped or paused, it will return an error. +func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { + ec := d.execCommands.Get(name) + + // If the exec is found but its container is not in the daemon's list of + // containers then it must have been deleted, in which case instead of + // saying the container isn't running, we should return a 404 so that + // the user sees the same error now that they will after the + // 5 minute clean-up loop is run which erases old/dead execs. + + if ec != nil { + if container := d.containers.Get(ec.ContainerID); container != nil { + if !container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String()) + } + if container.IsPaused() { + return nil, errExecPaused(container.ID) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return ec, nil + } + } + + return nil, errExecNotFound(name) +} + +func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) { + container.ExecCommands.Delete(execConfig.ID) + d.execCommands.Delete(execConfig.ID) +} + +func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { + container, err := d.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + if container.IsPaused() { + return nil, errExecPaused(name) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return container, nil +} + +// ContainerExecCreate sets up an exec in a running container. +func (d *Daemon) ContainerExecCreate(config *types.ExecConfig) (string, error) { + container, err := d.getActiveContainer(config.Container) + if err != nil { + return "", err + } + + cmd := strslice.StrSlice(config.Cmd) + entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd) + + keys := []byte{} + if config.DetachKeys != "" { + keys, err = term.ToBytes(config.DetachKeys) + if err != nil { + logrus.Warnf("Wrong escape keys provided (%s, error: %s) using default : ctrl-p ctrl-q", config.DetachKeys, err.Error()) + } + } + + execConfig := exec.NewConfig() + execConfig.OpenStdin = config.AttachStdin + execConfig.OpenStdout = config.AttachStdout + execConfig.OpenStderr = config.AttachStderr + execConfig.ContainerID = container.ID + execConfig.DetachKeys = keys + execConfig.Entrypoint = entrypoint + execConfig.Args = args + execConfig.Tty = config.Tty + execConfig.Privileged = config.Privileged + execConfig.User = config.User + if len(execConfig.User) == 0 { + execConfig.User = container.Config.User + } + + d.registerExecCommand(container, execConfig) + + d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + + return execConfig.ID, nil +} + +// ContainerExecStart starts a previously set up exec instance. The +// std streams are set up. +func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) (err error) { + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + ) + + ec, err := d.getExecConfig(name) + if err != nil { + return errExecNotFound(name) + } + + ec.Lock() + if ec.ExitCode != nil { + ec.Unlock() + err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) + return errors.NewRequestConflictError(err) + } + + if ec.Running { + ec.Unlock() + return fmt.Errorf("Error: Exec command %s is already running", ec.ID) + } + ec.Running = true + defer func() { + if err != nil { + ec.Running = false + exitCode := 126 + ec.ExitCode = &exitCode + } + }() + ec.Unlock() + + c := d.containers.Get(ec.ContainerID) + logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) + d.LogContainerEvent(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " ")) + + if ec.OpenStdin && stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debugf("Closing buffered stdin pipe") + pools.Copy(w, stdin) + }() + cStdin = r + } + if ec.OpenStdout { + cStdout = stdout + } + if ec.OpenStderr { + cStderr = stderr + } + + if ec.OpenStdin { + ec.NewInputPipes() + } else { + ec.NewNopInputPipe() + } + + p := libcontainerd.Process{ + Args: append([]string{ec.Entrypoint}, ec.Args...), + Terminal: ec.Tty, + } + + if err := execSetPlatformOpt(c, ec, &p); err != nil { + return nil + } + + attachErr := container.AttachStreams(context.Background(), ec.StreamConfig, ec.OpenStdin, true, ec.Tty, cStdin, cStdout, cStderr, ec.DetachKeys) + + if err := d.containerd.AddProcess(c.ID, name, p); err != nil { + return err + } + + err = <-attachErr + if err != nil { + return fmt.Errorf("attach failed with error: %v", err) + } + return nil +} + +// execCommandGC runs a ticker to clean up the daemon references +// of exec configs that are no longer part of the container. +func (d *Daemon) execCommandGC() { + for range time.Tick(5 * time.Minute) { + var ( + cleaned int + liveExecCommands = d.containerExecIds() + ) + for id, config := range d.execCommands.Commands() { + if config.CanRemove { + cleaned++ + d.execCommands.Delete(id) + } else { + if _, exists := liveExecCommands[id]; !exists { + config.CanRemove = true + } + } + } + if cleaned > 0 { + logrus.Debugf("clean %d unused exec commands", cleaned) + } + } +} + +// containerExecIds returns a list of all the current exec ids that are in use +// and running inside a container. +func (d *Daemon) containerExecIds() map[string]struct{} { + ids := map[string]struct{}{} + for _, c := range d.containers.List() { + for _, id := range c.ExecCommands.List() { + ids[id] = struct{}{} + } + } + return ids +} diff --git a/vendor/github.com/docker/docker/daemon/exec/exec.go b/vendor/github.com/docker/docker/daemon/exec/exec.go new file mode 100644 index 00000000..bbeb1c16 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec/exec.go @@ -0,0 +1,93 @@ +package exec + +import ( + "sync" + + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" +) + +// Config holds the configurations for execs. The Daemon keeps +// track of both running and finished execs so that they can be +// examined both during and after completion. +type Config struct { + sync.Mutex + *runconfig.StreamConfig + ID string + Running bool + ExitCode *int + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Entrypoint string + Args []string + Tty bool + Privileged bool + User string +} + +// NewConfig initializes the a new exec configuration +func NewConfig() *Config { + return &Config{ + ID: stringid.GenerateNonCryptoID(), + StreamConfig: runconfig.NewStreamConfig(), + } +} + +// Store keeps track of the exec configurations. +type Store struct { + commands map[string]*Config + sync.RWMutex +} + +// NewStore initializes a new exec store. +func NewStore() *Store { + return &Store{commands: make(map[string]*Config, 0)} +} + +// Commands returns the exec configurations in the store. +func (e *Store) Commands() map[string]*Config { + e.RLock() + commands := make(map[string]*Config, len(e.commands)) + for id, config := range e.commands { + commands[id] = config + } + e.RUnlock() + return commands +} + +// Add adds a new exec configuration to the store. +func (e *Store) Add(id string, Config *Config) { + e.Lock() + e.commands[id] = Config + e.Unlock() +} + +// Get returns an exec configuration by its id. +func (e *Store) Get(id string) *Config { + e.RLock() + res := e.commands[id] + e.RUnlock() + return res +} + +// Delete removes an exec configuration from the store. +func (e *Store) Delete(id string) { + e.Lock() + delete(e.commands, id) + e.Unlock() +} + +// List returns the list of exec ids in the store. +func (e *Store) List() []string { + var IDs []string + e.RLock() + for id := range e.commands { + IDs = append(IDs, id) + } + e.RUnlock() + return IDs +} diff --git a/vendor/github.com/docker/docker/daemon/exec_linux.go b/vendor/github.com/docker/docker/daemon/exec_linux.go new file mode 100644 index 00000000..a2c86b28 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec_linux.go @@ -0,0 +1,26 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + if len(ec.User) > 0 { + uid, gid, additionalGids, err := getUser(c, ec.User) + if err != nil { + return err + } + p.User = &libcontainerd.User{ + UID: uid, + GID: gid, + AdditionalGids: additionalGids, + } + } + if ec.Privileged { + p.Capabilities = caps.GetAllCapabilities() + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/export.go b/vendor/github.com/docker/docker/daemon/export.go new file mode 100644 index 00000000..80d7dbb2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/export.go @@ -0,0 +1,55 @@ +package daemon + +import ( + "fmt" + "io" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" +) + +// ContainerExport writes the contents of the container to the given +// writer. An error is returned if the container cannot be found. +func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + data, err := daemon.containerExport(container) + if err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + defer data.Close() + + // Stream the entire contents of the container (basically a volatile snapshot) + if _, err := io.Copy(out, data); err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + return nil +} + +func (daemon *Daemon) containerExport(container *container.Container) (archive.Archive, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + }) + if err != nil { + daemon.Unmount(container) + return nil, err + } + arch := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + daemon.Unmount(container) + return err + }) + daemon.LogContainerEvent(container, "export") + return arch, err +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/counter.go b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go deleted file mode 100644 index 572fc9be..00000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/counter.go +++ /dev/null @@ -1,32 +0,0 @@ -package graphdriver - -import "sync" - -// RefCounter is a generic counter for use by graphdriver Get/Put calls -type RefCounter struct { - counts map[string]int - mu sync.Mutex -} - -// NewRefCounter returns a new RefCounter -func NewRefCounter() *RefCounter { - return &RefCounter{counts: make(map[string]int)} -} - -// Increment increaes the ref count for the given id and returns the current count -func (c *RefCounter) Increment(id string) int { - c.mu.Lock() - c.counts[id]++ - count := c.counts[id] - c.mu.Unlock() - return count -} - -// Decrement decreases the ref count for the given id and returns the current count -func (c *RefCounter) Decrement(id string) int { - c.mu.Lock() - c.counts[id]-- - count := c.counts[id] - c.mu.Unlock() - return count -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go index 495bac2c..ced960b4 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go @@ -46,12 +46,9 @@ type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDM type ProtoDriver interface { // String returns a string representation of this driver. String() string - // CreateReadWrite creates a new, empty filesystem layer that is ready - // to be used as the storage for a container. - CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error // Create creates a new, empty, filesystem layer with the // specified id and parent and mountLabel. Parent and mountLabel may be "". - Create(id, parent, mountLabel string, storageOpt map[string]string) error + Create(id, parent, mountLabel string) error // Remove attempts to remove the filesystem layer with this id. Remove(id string) error // Get returns the mountpoint for the layered filesystem referred diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go deleted file mode 100644 index 6c09affa..00000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -package graphdriver - -var ( - // Slice of drivers that should be used in order - priority = []string{ - "windowsfilter", - "windowsdiff", - "vfs", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - // Note it is OK to return FsMagicUnsupported on Windows. - return FsMagicUnsupported, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go new file mode 100644 index 00000000..7d81a83a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go @@ -0,0 +1,169 @@ +// +build linux + +package overlay + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +type copyFlags int + +const ( + copyHardlink copyFlags = 1 << iota +) + +func copyRegular(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +func copyDir(srcDir, dstDir string, flags copyFlags) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch f.Mode() & os.ModeType { + case 0: // Regular file + if flags©Hardlink != 0 { + isHardlink = true + if err := os.Link(srcPath, dstPath); err != nil { + return err + } + } else { + if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { + return err + } + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("Unknown file type for %s\n", srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { + return err + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + return err +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go new file mode 100644 index 00000000..f266408f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go @@ -0,0 +1,470 @@ +// +build linux + +package overlay + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "github.com/docker/containerd/subreaper/exec" + "path" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + + "github.com/opencontainers/runc/libcontainer/label" +) + +// This is a small wrapper over the NaiveDiffWriter that lets us have a custom +// implementation of ApplyDiff() + +var ( + // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. + ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") +) + +// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. +type ApplyDiffProtoDriver interface { + graphdriver.ProtoDriver + // ApplyDiff writes the diff to the archive for the given id and parent id. + // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. + ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +} + +type naiveDiffDriverWithApply struct { + graphdriver.Driver + applyDiff ApplyDiffProtoDriver +} + +// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. +func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { + return &naiveDiffDriverWithApply{ + Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), + applyDiff: driver, + } +} + +// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. +func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { + b, err := d.applyDiff.ApplyDiff(id, parent, diff) + if err == ErrApplyDiffFallback { + return d.Driver.ApplyDiff(id, parent, diff) + } + return b, err +} + +// This backend uses the overlay union filesystem for containers +// plus hard link file sharing for images. + +// Each container/image can have a "root" subdirectory which is a plain +// filesystem hierarchy, or they can use overlay. + +// If they use overlay there is a "upper" directory and a "lower-id" +// file, as well as "merged" and "work" directories. The "upper" +// directory has the upper layer of the overlay, and "lower-id" contains +// the id of the parent whose "root" directory shall be used as the lower +// layer in the overlay. The overlay itself is mounted in the "merged" +// directory, and the "work" dir is needed for overlay to work. + +// When a overlay layer is created there are two cases, either the +// parent has a "root" dir, then we start out with a empty "upper" +// directory overlaid on the parents root. This is typically the +// case with the init layer of a container which is based on an image. +// If there is no "root" in the parent, we inherit the lower-id from +// the parent and start by making a copy in the parent's "upper" dir. +// This is typically the case for a container layer which copies +// its parent -init upper layer. + +// Additionally we also have a custom implementation of ApplyLayer +// which makes a recursive copy of the parent "root" layer using +// hardlinks to share file data, and then applies the layer on top +// of that. This means all child images share file (but not directory) +// data with the parent. + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + pathCacheLock sync.Mutex + pathCache map[string]string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +var backingFs = "" + +func init() { + graphdriver.Register("overlay", Init) +} + +// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + // check if they are running over btrfs or aufs + switch fsMagic { + case graphdriver.FsMagicBtrfs: + logrus.Error("'overlay' is not supported over btrfs.") + return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicAufs: + logrus.Error("'overlay' is not supported over aufs.") + return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicZfs: + logrus.Error("'overlay' is not supported over zfs.") + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + d := &Driver{ + home: home, + pathCache: make(map[string]string), + uidMaps: uidMaps, + gidMaps: gidMaps, + } + + return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return "overlay" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + } +} + +// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := make(map[string]string) + + // If id has a root, it is an image + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + metadata["RootDir"] = rootDir + return metadata, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return nil, err + } + + metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") + metadata["UpperDir"] = path.Join(dir, "upper") + metadata["WorkDir"] = path.Join(dir, "work") + metadata["MergedDir"] = path.Join(dir, "merged") + + return metadata, nil +} + +// Cleanup simply returns nil and do not change the existing filesystem. +// This is required to satisfy the graphdriver.Driver interface. +func (d *Driver) Cleanup() error { + return nil +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent, mountLabel string) (retErr error) { + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + // Toplevel images are just a "root" dir + if parent == "" { + if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { + return err + } + return nil + } + + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return err + } + + // If parent has a root, just do a overlay to it + parentRoot := path.Join(parentDir, "root") + + if s, err := os.Lstat(parentRoot); err == nil { + if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { + return err + } + return nil + } + + // Otherwise, copy the upper and the lower-id from the parent + + lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { + return err + } + + parentUpperDir := path.Join(parentDir, "upper") + s, err := os.Lstat(parentUpperDir) + if err != nil { + return err + } + + upperDir := path.Join(dir, "upper") + if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + return copyDir(parentUpperDir, upperDir, 0) +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + d.pathCacheLock.Lock() + delete(d.pathCache, id) + d.pathCacheLock.Unlock() + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + // If id has a root, just return it + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + d.pathCacheLock.Lock() + d.pathCache[id] = rootDir + d.pathCacheLock.Unlock() + return rootDir, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return "", err + } + lowerDir := path.Join(d.dir(string(lowerID)), "root") + upperDir := path.Join(dir, "upper") + workDir := path.Join(dir, "work") + mergedDir := path.Join(dir, "merged") + + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + + // if it's mounted already, just return + mounted, err := d.mounted(mergedDir) + if err != nil { + return "", err + } + if mounted { + return mergedDir, nil + } + + if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + + d.pathCacheLock.Lock() + d.pathCache[id] = mergedDir + d.pathCacheLock.Unlock() + + return mergedDir, nil +} + +func (d *Driver) mounted(dir string) (bool, error) { + return graphdriver.Mounted(graphdriver.FsMagicOverlay, dir) +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + d.pathCacheLock.Lock() + mountpoint, exists := d.pathCache[id] + d.pathCacheLock.Unlock() + + if !exists { + logrus.Debugf("Put on a non-mounted device %s", id) + // but it might be still here + if d.Exists(id) { + mountpoint = path.Join(d.dir(id), "merged") + } + + d.pathCacheLock.Lock() + d.pathCache[id] = mountpoint + d.pathCacheLock.Unlock() + } + + if mounted, err := d.mounted(mountpoint); mounted || err != nil { + if err = syscall.Unmount(mountpoint, 0); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + return err + } + return nil +} + +// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return a ErrApplyDiffFallback error. +func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) { + dir := d.dir(id) + + if parent == "" { + return 0, ErrApplyDiffFallback + } + + parentRootDir := path.Join(d.dir(parent), "root") + if _, err := os.Stat(parentRootDir); err != nil { + return 0, ErrApplyDiffFallback + } + + // We now know there is a parent, and it has a "root" directory containing + // the full root filesystem. We can just hardlink it and apply the + // layer. This relies on two things: + // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container + // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) + // These are all currently true and are not expected to break + + tmpRootDir, err := ioutil.TempDir(dir, "tmproot") + if err != nil { + return 0, err + } + defer func() { + if err != nil { + os.RemoveAll(tmpRootDir) + } else { + os.RemoveAll(path.Join(dir, "upper")) + os.RemoveAll(path.Join(dir, "work")) + os.RemoveAll(path.Join(dir, "merged")) + os.RemoveAll(path.Join(dir, "lower-id")) + } + }() + + if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { + return 0, err + } + + options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} + if size, err = chrootarchive.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { + return 0, err + } + + rootDir := path.Join(dir, "root") + if err := os.Rename(tmpRootDir, rootDir); err != nil { + return 0, err + } + + return +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go new file mode 100644 index 00000000..3dbb4de4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go index 3a8d599c..28657fef 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go @@ -54,23 +54,7 @@ func (d *graphDriverProxy) String() string { return d.name } -func (d *graphDriverProxy) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - MountLabel: mountLabel, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Create(id, parent, mountLabel string, storageOpt map[string]string) error { +func (d *graphDriverProxy) Create(id, parent, mountLabel string) error { args := &graphDriverRequest{ ID: id, Parent: parent, diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go new file mode 100644 index 00000000..3a952642 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_overlay,linux + +package register + +import ( + // register the overlay graphdriver + _ "github.com/docker/docker/daemon/graphdriver/overlay" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go new file mode 100644 index 00000000..98fad23b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/docker/docker/daemon/graphdriver/vfs" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go new file mode 100644 index 00000000..00d9f8ec --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go @@ -0,0 +1,135 @@ +package vfs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // CopyWithTar defines the copy method to use. + CopyWithTar = chrootarchive.CopyWithTar +) + +func init() { + graphdriver.Register("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent, mountLabel string) error { + dir := d.dir(id) + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { + return err + } + opts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(opts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := CopyWithTar(parentDir, dir); err != nil { + return err + } + return nil +} + +func (d *Driver) dir(id string) string { + return filepath.Join(d.home, "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/docker/docker/daemon/image_delete.go b/vendor/github.com/docker/docker/daemon/image_delete.go new file mode 100644 index 00000000..7c6329a6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_delete.go @@ -0,0 +1,371 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/errors" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" +) + +type conflictType int + +const ( + conflictDependentChild conflictType = (1 << iota) + conflictRunningContainer + conflictActiveReference + conflictStoppedContainer + conflictHard = conflictDependentChild | conflictRunningContainer + conflictSoft = conflictActiveReference | conflictStoppedContainer +) + +// ImageDelete deletes the image referenced by the given imageRef from this +// daemon. The given imageRef can be an image ID, ID prefix, or a repository +// reference (with an optional tag or digest, defaulting to the tag name +// "latest"). There is differing behavior depending on whether the given +// imageRef is a repository reference or not. +// +// If the given imageRef is a repository reference then that repository +// reference will be removed. However, if there exists any containers which +// were created using the same image reference then the repository reference +// cannot be removed unless either there are other repository references to the +// same image or force is true. Following removal of the repository reference, +// the referenced image itself will attempt to be deleted as described below +// but quietly, meaning any image delete conflicts will cause the image to not +// be deleted and the conflict will not be reported. +// +// There may be conflicts preventing deletion of an image and these conflicts +// are divided into two categories grouped by their severity: +// +// Hard Conflict: +// - a pull or build using the image. +// - any descendant image. +// - any running container using the image. +// +// Soft Conflict: +// - any stopped container using the image. +// - any repository tag or digest references to the image. +// +// The image cannot be removed if there are any hard conflicts and can be +// removed if there are soft conflicts only if force is true. +// +// If prune is true, ancestor images will each attempt to be deleted quietly, +// meaning any delete conflicts will cause the image to not be deleted and the +// conflict will not be reported. +// +// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph +// package. This would require that we no longer need the daemon to determine +// whether images are being used by a stopped or running container. +func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) { + records := []types.ImageDelete{} + + imgID, err := daemon.GetImageID(imageRef) + if err != nil { + return nil, daemon.imageNotExistToErrcode(err) + } + + repoRefs := daemon.referenceStore.References(imgID) + + var removedRepositoryRef bool + if !isImageIDPrefix(imgID.String(), imageRef) { + // A repository reference was given and should be removed + // first. We can only remove this reference if either force is + // true, there are multiple repository references to this + // image, or there are no containers using the given reference. + if !(force || len(repoRefs) > 1) { + if container := daemon.getContainerUsingImage(imgID); container != nil { + // If we removed the repository reference then + // this image would remain "dangling" and since + // we really want to avoid that the client must + // explicitly force its removal. + err := fmt.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) + return nil, errors.NewRequestConflictError(err) + } + } + + parsedRef, err := reference.ParseNamed(imageRef) + if err != nil { + return nil, err + } + + parsedRef, err = daemon.removeImageRef(parsedRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + + repoRefs = daemon.referenceStore.References(imgID) + + // If this is a tag reference and all the remaining references + // to this image are digest references, delete the remaining + // references so that they don't prevent removal of the image. + if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { + foundTagRef := false + for _, repoRef := range repoRefs { + if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical { + foundTagRef = true + break + } + } + if !foundTagRef { + for _, repoRef := range repoRefs { + if _, err := daemon.removeImageRef(repoRef); err != nil { + return records, err + } + + untaggedRecord := types.ImageDelete{Untagged: repoRef.String()} + records = append(records, untaggedRecord) + } + repoRefs = []reference.Named{} + } + } + + // If it has remaining references then the untag finished the remove + if len(repoRefs) > 0 { + return records, nil + } + + removedRepositoryRef = true + } else { + // If an ID reference was given AND there is exactly one + // repository reference to the image then we will want to + // remove that reference. + // FIXME: Is this the behavior we want? + if len(repoRefs) == 1 { + c := conflictHard + if !force { + c |= conflictSoft &^ conflictActiveReference + } + if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + return nil, conflict + } + + parsedRef, err := daemon.removeImageRef(repoRefs[0]) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + } + } + + return records, daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef) +} + +// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the +// given imageID. +func isImageIDPrefix(imageID, possiblePrefix string) bool { + if strings.HasPrefix(imageID, possiblePrefix) { + return true + } + + if i := strings.IndexRune(imageID, ':'); i >= 0 { + return strings.HasPrefix(imageID[i+1:], possiblePrefix) + } + + return false +} + +// getContainerUsingImage returns a container that was created using the given +// imageID. Returns nil if there is no such container. +func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container { + return daemon.containers.First(func(c *container.Container) bool { + return c.ImageID == imageID + }) +} + +// removeImageRef attempts to parse and remove the given image reference from +// this daemon's store of repository tag/digest references. The given +// repositoryRef must not be an image ID but a repository name followed by an +// optional tag or digest reference. If tag or digest is omitted, the default +// tag is used. Returns the resolved image reference and an error. +func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) { + ref = reference.WithDefaultTag(ref) + // Ignore the boolean value returned, as far as we're concerned, this + // is an idempotent operation and it's okay if the reference didn't + // exist in the first place. + _, err := daemon.referenceStore.Delete(ref) + + return ref, err +} + +// removeAllReferencesToImageID attempts to remove every reference to the given +// imgID from this daemon's store of repository tag/digest references. Returns +// on the first encountered error. Removed references are logged to this +// daemon's event service. An "Untagged" types.ImageDelete is added to the +// given list of records. +func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDelete) error { + imageRefs := daemon.referenceStore.References(imgID) + + for _, imageRef := range imageRefs { + parsedRef, err := daemon.removeImageRef(imageRef) + if err != nil { + return err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + *records = append(*records, untaggedRecord) + } + + return nil +} + +// ImageDeleteConflict holds a soft or hard conflict and an associated error. +// Implements the error interface. +type imageDeleteConflict struct { + hard bool + used bool + imgID image.ID + message string +} + +func (idc *imageDeleteConflict) Error() string { + var forceMsg string + if idc.hard { + forceMsg = "cannot be forced" + } else { + forceMsg = "must be forced" + } + + return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) +} + +// imageDeleteHelper attempts to delete the given image from this daemon. If +// the image has any hard delete conflicts (child images or running containers +// using the image) then it cannot be deleted. If the image has any soft delete +// conflicts (any tags/digests referencing the image or any stopped container +// using the image) then it can only be deleted if force is true. If the delete +// succeeds and prune is true, the parent images are also deleted if they do +// not have any soft or hard delete conflicts themselves. Any deleted images +// and untagged references are appended to the given records. If any error or +// conflict is encountered, it will be returned immediately without deleting +// the image. If quiet is true, any encountered conflicts will be ignored and +// the function will return nil immediately without deleting the image. +func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDelete, force, prune, quiet bool) error { + // First, determine if this image has any conflicts. Ignore soft conflicts + // if force is true. + c := conflictHard + if !force { + c |= conflictSoft + } + if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) { + // Ignore conflicts UNLESS the image is "dangling" or not being used in + // which case we want the user to know. + return nil + } + + // There was a conflict and it's either a hard conflict OR we are not + // forcing deletion on soft conflicts. + return conflict + } + + parent, err := daemon.imageStore.GetParent(imgID) + if err != nil { + // There may be no parent + parent = "" + } + + // Delete all repository tag/digest references to this image. + if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil { + return err + } + + removedLayers, err := daemon.imageStore.Delete(imgID) + if err != nil { + return err + } + + daemon.LogImageEvent(imgID.String(), imgID.String(), "delete") + *records = append(*records, types.ImageDelete{Deleted: imgID.String()}) + for _, removedLayer := range removedLayers { + *records = append(*records, types.ImageDelete{Deleted: removedLayer.ChainID.String()}) + } + + if !prune || parent == "" { + return nil + } + + // We need to prune the parent image. This means delete it if there are + // no tags/digests referencing it and there are no containers using it ( + // either running or stopped). + // Do not force prunings, but do so quietly (stopping on any encountered + // conflicts). + return daemon.imageDeleteHelper(parent, records, false, true, true) +} + +// checkImageDeleteConflict determines whether there are any conflicts +// preventing deletion of the given image from this daemon. A hard conflict is +// any image which has the given image as a parent or any running container +// using the image. A soft conflict is any tags/digest referencing the given +// image or any stopped container using the image. If ignoreSoftConflicts is +// true, this function will not check for soft conflict conditions. +func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { + // Check if the image has any descendant images. + if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 { + return &imageDeleteConflict{ + hard: true, + imgID: imgID, + message: "image has dependent child images", + } + } + + if mask&conflictRunningContainer != 0 { + // Check if any running container is using the image. + running := func(c *container.Container) bool { + return c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(running); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + hard: true, + used: true, + message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), + } + } + } + + // Check if any repository tags/digest reference this image. + if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID)) > 0 { + return &imageDeleteConflict{ + imgID: imgID, + message: "image is referenced in one or more repositories", + } + } + + if mask&conflictStoppedContainer != 0 { + // Check if any stopped containers reference this image. + stopped := func(c *container.Container) bool { + return !c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(stopped); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + used: true, + message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), + } + } + } + + return nil +} + +// imageIsDangling returns whether the given image is "dangling" which means +// that there are no repository references to the given image and it has no +// child images. +func (daemon *Daemon) imageIsDangling(imgID image.ID) bool { + return !(len(daemon.referenceStore.References(imgID)) > 0 || len(daemon.imageStore.Children(imgID)) > 0) +} diff --git a/vendor/github.com/docker/docker/daemon/images.go b/vendor/github.com/docker/docker/daemon/images.go new file mode 100644 index 00000000..e4c3797f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images.go @@ -0,0 +1,162 @@ +package daemon + +import ( + "fmt" + "path" + "sort" + + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" +) + +var acceptedImageFilterTags = map[string]bool{ + "dangling": true, + "label": true, +} + +// byCreated is a temporary type used to sort a list of images by creation +// time. +type byCreated []*types.Image + +func (r byCreated) Len() int { return len(r) } +func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } + +// Map returns a map of all images in the ImageStore +func (daemon *Daemon) Map() map[image.ID]*image.Image { + return daemon.imageStore.Map() +} + +// Images returns a filtered list of images. filterArgs is a JSON-encoded set +// of filter arguments which will be interpreted by api/types/filters. +// filter is a shell glob string applied to repository names. The argument +// named all controls whether all images in the graph are filtered, or just +// the heads. +func (daemon *Daemon) Images(filterArgs, filter string, all bool) ([]*types.Image, error) { + var ( + allImages map[image.ID]*image.Image + err error + danglingOnly = false + ) + + imageFilters, err := filters.FromParam(filterArgs) + if err != nil { + return nil, err + } + if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { + return nil, err + } + + if imageFilters.Include("dangling") { + if imageFilters.ExactMatch("dangling", "true") { + danglingOnly = true + } else if !imageFilters.ExactMatch("dangling", "false") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) + } + } + if danglingOnly { + allImages = daemon.imageStore.Heads() + } else { + allImages = daemon.imageStore.Map() + } + + images := []*types.Image{} + + var filterTagged bool + if filter != "" { + filterRef, err := reference.ParseNamed(filter) + if err == nil { // parse error means wildcard repo + if _, ok := filterRef.(reference.NamedTagged); ok { + filterTagged = true + } + } + } + + for id, img := range allImages { + if imageFilters.Include("label") { + // Very old image that do not have image.Config (or even labels) + if img.Config == nil { + continue + } + // We are now sure image.Config is not nil + if !imageFilters.MatchKVList("label", img.Config.Labels) { + continue + } + } + + layerID := img.RootFS.ChainID() + var size int64 + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + + size, err = l.Size() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + } + + newImage := newImage(img, size) + + for _, ref := range daemon.referenceStore.References(id) { + if filter != "" { // filter by tag/repo name + if filterTagged { // filter by tag, require full ref match + if ref.String() != filter { + continue + } + } else if matched, err := path.Match(filter, ref.Name()); !matched || err != nil { // name only match, FIXME: docs say exact + continue + } + } + if _, ok := ref.(reference.Canonical); ok { + newImage.RepoDigests = append(newImage.RepoDigests, ref.String()) + } + if _, ok := ref.(reference.NamedTagged); ok { + newImage.RepoTags = append(newImage.RepoTags, ref.String()) + } + } + if newImage.RepoDigests == nil && newImage.RepoTags == nil { + if all || len(daemon.imageStore.Children(id)) == 0 { + + if imageFilters.Include("dangling") && !danglingOnly { + //dangling=false case, so dangling image is not needed + continue + } + if filter != "" { // skip images with no references if filtering by tag + continue + } + newImage.RepoDigests = []string{"@"} + newImage.RepoTags = []string{":"} + } else { + continue + } + } else if danglingOnly { + continue + } + + images = append(images, newImage) + } + + sort.Sort(sort.Reverse(byCreated(images))) + + return images, nil +} + +func newImage(image *image.Image, size int64) *types.Image { + newImage := new(types.Image) + newImage.ParentID = image.Parent.String() + newImage.ID = image.ID().String() + newImage.Created = image.Created.Unix() + newImage.Size = size + newImage.VirtualSize = size + if image.Config != nil { + newImage.Labels = image.Config.Labels + } + return newImage +} diff --git a/vendor/github.com/docker/docker/daemon/import.go b/vendor/github.com/docker/docker/daemon/import.go new file mode 100644 index 00000000..4961a30f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/import.go @@ -0,0 +1,109 @@ +package daemon + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + "runtime" + "time" + + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types/container" +) + +// ImportImage imports an image, getting the archived layer data either from +// inConfig (if src is "-"), or from a URI specified in src. Progress output is +// written to outStream. Repository and tag names can optionally be given in +// the repo and tag arguments, respectively. +func (daemon *Daemon) ImportImage(src string, newRef reference.Named, msg string, inConfig io.ReadCloser, outStream io.Writer, config *container.Config) error { + var ( + sf = streamformatter.NewJSONStreamFormatter() + rc io.ReadCloser + resp *http.Response + ) + + if src == "-" { + rc = inConfig + } else { + inConfig.Close() + u, err := url.Parse(src) + if err != nil { + return err + } + if u.Scheme == "" { + u.Scheme = "http" + u.Host = src + u.Path = "" + } + outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) + resp, err = httputils.Download(u.String()) + if err != nil { + return err + } + progressOutput := sf.NewProgressOutput(outStream, true) + rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") + } + + defer rc.Close() + if len(msg) == 0 { + msg = "Imported from " + src + } + + inflatedLayerData, err := archive.DecompressStream(rc) + if err != nil { + return err + } + // TODO: support windows baselayer? + l, err := daemon.layerStore.Register(inflatedLayerData, "") + if err != nil { + return err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + created := time.Now().UTC() + imgConfig, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Created: created, + Comment: msg, + }, + RootFS: &image.RootFS{ + Type: "layers", + DiffIDs: []layer.DiffID{l.DiffID()}, + }, + History: []image.History{{ + Created: created, + Comment: msg, + }}, + }) + if err != nil { + return err + } + + id, err := daemon.imageStore.Create(imgConfig) + if err != nil { + return err + } + + // FIXME: connect with commit code and call refstore directly + if newRef != nil { + if err := daemon.TagImage(newRef, id.String()); err != nil { + return err + } + } + + daemon.LogImageEvent(id.String(), id.String(), "import") + outStream.Write(sf.FormatStatus("", id.String())) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/info.go b/vendor/github.com/docker/docker/daemon/info.go new file mode 100644 index 00000000..c94cf9ba --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/info.go @@ -0,0 +1,162 @@ +package daemon + +import ( + "os" + "runtime" + "sync/atomic" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/pkg/platform" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume/drivers" + "github.com/docker/engine-api/types" + "github.com/docker/go-connections/sockets" +) + +// SystemInfo returns information about the host server the daemon is running on. +func (daemon *Daemon) SystemInfo() (*types.Info, error) { + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + + operatingSystem := "" + if s, err := operatingsystem.GetOperatingSystem(); err != nil { + logrus.Warnf("Could not get operating system name: %v", err) + } else { + operatingSystem = s + } + + // Don't do containerized check on Windows + if runtime.GOOS != "windows" { + if inContainer, err := operatingsystem.IsContainerized(); err != nil { + logrus.Errorf("Could not determine if daemon is containerized: %v", err) + operatingSystem += " (error determining if containerized)" + } else if inContainer { + operatingSystem += " (containerized)" + } + } + + meminfo, err := system.ReadMemInfo() + if err != nil { + logrus.Errorf("Could not read system memory info: %v", err) + } + + sysInfo := sysinfo.New(true) + + var cRunning, cPaused, cStopped int32 + daemon.containers.ApplyAll(func(c *container.Container) { + switch c.StateString() { + case "paused": + atomic.AddInt32(&cPaused, 1) + case "running": + atomic.AddInt32(&cRunning, 1) + default: + atomic.AddInt32(&cStopped, 1) + } + }) + + v := &types.Info{ + ID: daemon.ID, + Containers: int(cRunning + cPaused + cStopped), + ContainersRunning: int(cRunning), + ContainersPaused: int(cPaused), + ContainersStopped: int(cStopped), + Images: len(daemon.imageStore.Map()), + Driver: daemon.GraphDriverName(), + DriverStatus: daemon.layerStore.DriverStatus(), + Plugins: daemon.showPluginsInfo(), + IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, + BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, + BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled, + Debug: utils.IsDebugEnabled(), + NFd: fileutils.GetTotalUsedFds(), + NGoroutines: runtime.NumGoroutine(), + SystemTime: time.Now().Format(time.RFC3339Nano), + LoggingDriver: daemon.defaultLogConfig.Type, + CgroupDriver: daemon.getCgroupDriver(), + NEventsListener: daemon.EventsService.SubscribersCount(), + KernelVersion: kernelVersion, + OperatingSystem: operatingSystem, + IndexServerAddress: registry.IndexServer, + OSType: platform.OSType, + Architecture: platform.Architecture, + RegistryConfig: daemon.RegistryService.ServiceConfig(), + NCPU: runtime.NumCPU(), + MemTotal: meminfo.MemTotal, + DockerRootDir: daemon.configStore.Root, + Labels: daemon.configStore.Labels, + ExperimentalBuild: utils.ExperimentalBuild(), + ServerVersion: dockerversion.Version, + ClusterStore: daemon.configStore.ClusterStore, + ClusterAdvertise: daemon.configStore.ClusterAdvertise, + HTTPProxy: sockets.GetProxyEnv("http_proxy"), + HTTPSProxy: sockets.GetProxyEnv("https_proxy"), + NoProxy: sockets.GetProxyEnv("no_proxy"), + } + + // TODO Windows. Refactor this more once sysinfo is refactored into + // platform specific code. On Windows, sysinfo.cgroupMemInfo and + // sysinfo.cgroupCpuInfo will be nil otherwise and cause a SIGSEGV if + // an attempt is made to access through them. + if runtime.GOOS != "windows" { + v.MemoryLimit = sysInfo.MemoryLimit + v.SwapLimit = sysInfo.SwapLimit + v.KernelMemory = sysInfo.KernelMemory + v.OomKillDisable = sysInfo.OomKillDisable + v.CPUCfsPeriod = sysInfo.CPUCfsPeriod + v.CPUCfsQuota = sysInfo.CPUCfsQuota + v.CPUShares = sysInfo.CPUShares + v.CPUSet = sysInfo.Cpuset + } + + if hostname, err := os.Hostname(); err == nil { + v.Name = hostname + } + + return v, nil +} + +// SystemVersion returns version information about the daemon. +func (daemon *Daemon) SystemVersion() types.Version { + v := types.Version{ + Version: dockerversion.Version, + GitCommit: dockerversion.GitCommit, + GoVersion: runtime.Version(), + Os: runtime.GOOS, + Arch: runtime.GOARCH, + BuildTime: dockerversion.BuildTime, + Experimental: utils.ExperimentalBuild(), + } + + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + v.KernelVersion = kernelVersion + + return v +} + +func (daemon *Daemon) showPluginsInfo() types.PluginsInfo { + var pluginsInfo types.PluginsInfo + + pluginsInfo.Volume = volumedrivers.GetDriverList() + + pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins + + return pluginsInfo +} diff --git a/vendor/github.com/docker/docker/daemon/inspect.go b/vendor/github.com/docker/docker/daemon/inspect.go new file mode 100644 index 00000000..2f810e3e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/inspect.go @@ -0,0 +1,245 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/pkg/version" + "github.com/docker/engine-api/types" + networktypes "github.com/docker/engine-api/types/network" + "github.com/docker/engine-api/types/versions/v1p20" +) + +// ContainerInspect returns low-level information about a +// container. Returns an error if the container cannot be found, or if +// there is an error getting the data. +func (daemon *Daemon) ContainerInspect(name string, size bool, version version.Version) (interface{}, error) { + switch { + case version.LessThan("1.20"): + return daemon.containerInspectPre120(name) + case version.Equal("1.20"): + return daemon.containerInspect120(name) + } + return daemon.containerInspectCurrent(name, size) +} + +func (daemon *Daemon) containerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, size) + if err != nil { + return nil, err + } + + mountPoints := addMountPoints(container) + networkSettings := &types.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: container.NetworkSettings.Bridge, + SandboxID: container.NetworkSettings.SandboxID, + HairpinMode: container.NetworkSettings.HairpinMode, + LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, + Ports: container.NetworkSettings.Ports, + SandboxKey: container.NetworkSettings.SandboxKey, + SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, + SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks), + Networks: container.NetworkSettings.Networks, + } + + return &types.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: container.Config, + NetworkSettings: networkSettings, + }, nil +} + +// containerInspect120 serializes the master version of a container into a json type. +func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, false) + if err != nil { + return nil, err + } + + mountPoints := addMountPoints(container) + config := &v1p20.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p20.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func (daemon *Daemon) getInspectData(container *container.Container, size bool) (*types.ContainerJSONBase, error) { + // make a copy to play with + hostConfig := *container.HostConfig + + children := daemon.children(container) + hostConfig.Links = nil // do not expose the internal structure + for linkAlias, child := range children { + hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) + } + + // we need this trick to preserve empty log driver, so + // container will use daemon defaults even if daemon changes them + if hostConfig.LogConfig.Type == "" { + hostConfig.LogConfig.Type = daemon.defaultLogConfig.Type + } + + if len(hostConfig.LogConfig.Config) == 0 { + hostConfig.LogConfig.Config = daemon.defaultLogConfig.Config + } + + containerState := &types.ContainerState{ + Status: container.State.StateString(), + Running: container.State.Running, + Paused: container.State.Paused, + Restarting: container.State.Restarting, + OOMKilled: container.State.OOMKilled, + Dead: container.State.Dead, + Pid: container.State.Pid, + ExitCode: container.State.ExitCode, + Error: container.State.Error, + StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), + FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), + } + + contJSONBase := &types.ContainerJSONBase{ + ID: container.ID, + Created: container.Created.Format(time.RFC3339Nano), + Path: container.Path, + Args: container.Args, + State: containerState, + Image: container.ImageID.String(), + LogPath: container.LogPath, + Name: container.Name, + RestartCount: container.RestartCount, + Driver: container.Driver, + MountLabel: container.MountLabel, + ProcessLabel: container.ProcessLabel, + ExecIDs: container.GetExecIDs(), + HostConfig: &hostConfig, + } + + var ( + sizeRw int64 + sizeRootFs int64 + ) + if size { + sizeRw, sizeRootFs = daemon.getSize(container) + contJSONBase.SizeRw = &sizeRw + contJSONBase.SizeRootFs = &sizeRootFs + } + + // Now set any platform-specific fields + contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase) + + contJSONBase.GraphDriver.Name = container.Driver + + graphDriverData, err := container.RWLayer.Metadata() + if err != nil { + return nil, err + } + contJSONBase.GraphDriver.Data = graphDriverData + + return contJSONBase, nil +} + +// ContainerExecInspect returns low-level information about the exec +// command. An error is returned if the exec cannot be found. +func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, error) { + e, err := daemon.getExecConfig(id) + if err != nil { + return nil, err + } + + pc := inspectExecProcessConfig(e) + + return &backend.ExecInspect{ + ID: e.ID, + Running: e.Running, + ExitCode: e.ExitCode, + ProcessConfig: pc, + OpenStdin: e.OpenStdin, + OpenStdout: e.OpenStdout, + OpenStderr: e.OpenStderr, + CanRemove: e.CanRemove, + ContainerID: e.ContainerID, + DetachKeys: e.DetachKeys, + }, nil +} + +// VolumeInspect looks up a volume by name. An error is returned if +// the volume cannot be found. +func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) { + v, err := daemon.volumes.Get(name) + if err != nil { + return nil, err + } + return volumeToAPIType(v), nil +} + +func (daemon *Daemon) getBackwardsCompatibleNetworkSettings(settings *network.Settings) *v1p20.NetworkSettings { + result := &v1p20.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: settings.Bridge, + SandboxID: settings.SandboxID, + HairpinMode: settings.HairpinMode, + LinkLocalIPv6Address: settings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: settings.LinkLocalIPv6PrefixLen, + Ports: settings.Ports, + SandboxKey: settings.SandboxKey, + SecondaryIPAddresses: settings.SecondaryIPAddresses, + SecondaryIPv6Addresses: settings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(settings.Networks), + } + + return result +} + +// getDefaultNetworkSettings creates the deprecated structure that holds the information +// about the bridge network for a container. +func (daemon *Daemon) getDefaultNetworkSettings(networks map[string]*networktypes.EndpointSettings) types.DefaultNetworkSettings { + var settings types.DefaultNetworkSettings + + if defaultNetwork, ok := networks["bridge"]; ok { + settings.EndpointID = defaultNetwork.EndpointID + settings.Gateway = defaultNetwork.Gateway + settings.GlobalIPv6Address = defaultNetwork.GlobalIPv6Address + settings.GlobalIPv6PrefixLen = defaultNetwork.GlobalIPv6PrefixLen + settings.IPAddress = defaultNetwork.IPAddress + settings.IPPrefixLen = defaultNetwork.IPPrefixLen + settings.IPv6Gateway = defaultNetwork.IPv6Gateway + settings.MacAddress = defaultNetwork.MacAddress + } + return settings +} diff --git a/vendor/github.com/docker/docker/daemon/inspect_unix.go b/vendor/github.com/docker/docker/daemon/inspect_unix.go new file mode 100644 index 00000000..6033c02d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/inspect_unix.go @@ -0,0 +1,91 @@ +// +build !windows + +package daemon + +import ( + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/versions/v1p19" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + contJSONBase.AppArmorProfile = container.AppArmorProfile + contJSONBase.ResolvConfPath = container.ResolvConfPath + contJSONBase.HostnamePath = container.HostnamePath + contJSONBase.HostsPath = container.HostsPath + + return contJSONBase +} + +// containerInspectPre120 gets containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, false) + if err != nil { + return nil, err + } + + volumes := make(map[string]string) + volumesRW := make(map[string]bool) + for _, m := range container.MountPoints { + volumes[m.Destination] = m.Path() + volumesRW[m.Destination] = m.RW + } + + config := &v1p19.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + Memory: container.HostConfig.Memory, + MemorySwap: container.HostConfig.MemorySwap, + CPUShares: container.HostConfig.CPUShares, + CPUSet: container.HostConfig.CpusetCpus, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p19.ContainerJSON{ + ContainerJSONBase: base, + Volumes: volumes, + VolumesRW: volumesRW, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + Mode: m.Mode, + RW: m.RW, + Propagation: m.Propagation, + }) + } + return mountPoints +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + Privileged: &e.Privileged, + User: e.User, + } +} diff --git a/vendor/github.com/docker/docker/daemon/kill.go b/vendor/github.com/docker/docker/daemon/kill.go new file mode 100644 index 00000000..3967f0f2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/kill.go @@ -0,0 +1,153 @@ +package daemon + +import ( + "fmt" + "runtime" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/signal" +) + +type errNoSuchProcess struct { + pid int + signal int +} + +func (e errNoSuchProcess) Error() string { + return fmt.Sprintf("Cannot kill process (pid=%d) with signal %d: no such process.", e.pid, e.signal) +} + +// isErrNoSuchProcess returns true if the error +// is an instance of errNoSuchProcess. +func isErrNoSuchProcess(err error) bool { + _, ok := err.(errNoSuchProcess) + return ok +} + +// ContainerKill sends signal to the container +// If no signal is given (sig 0), then Kill with SIGKILL and wait +// for the container to exit. +// If a signal is given, then just send it to the container and return. +func (daemon *Daemon) ContainerKill(name string, sig uint64) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if sig != 0 && !signal.ValidSignalForPlatform(syscall.Signal(sig)) { + return fmt.Errorf("The %s daemon does not support signal %d", runtime.GOOS, sig) + } + + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + return daemon.Kill(container) + } + return daemon.killWithSignal(container, int(sig)) +} + +// killWithSignal sends the container the given signal. This wrapper for the +// host specific kill command prepares the container before attempting +// to send the signal. An error is returned if the container is paused +// or not running, or if there is a problem returned from the +// underlying kill command. +func (daemon *Daemon) killWithSignal(container *container.Container, sig int) error { + logrus.Debugf("Sending %d to %s", sig, container.ID) + container.Lock() + defer container.Unlock() + + // We could unpause the container for them rather than returning this error + if container.Paused { + return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) + } + + if !container.Running { + return errNotRunning{container.ID} + } + + container.ExitOnNext() + + if !daemon.IsShuttingDown() { + container.HasBeenManuallyStopped = true + } + + // if the container is currently restarting we do not need to send the signal + // to the process. Telling the monitor that it should exit on it's next event + // loop is enough + if container.Restarting { + return nil + } + + if err := daemon.kill(container, sig); err != nil { + err = fmt.Errorf("Cannot kill container %s: %s", container.ID, err) + // if container or process not exists, ignore the error + if strings.Contains(err.Error(), "container not found") || + strings.Contains(err.Error(), "no such process") { + logrus.Warnf("%s", err.Error()) + } else { + return err + } + } + + attributes := map[string]string{ + "signal": fmt.Sprintf("%d", sig), + } + daemon.LogContainerEventWithAttributes(container, "kill", attributes) + return nil +} + +// Kill forcefully terminates a container. +func (daemon *Daemon) Kill(container *container.Container) error { + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + // 1. Send SIGKILL + if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // its probably because its already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be environment specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we return it to the caller. + if isErrNoSuchProcess(err) { + return nil + } + + if container.IsRunning() { + container.WaitStop(2 * time.Second) + if container.IsRunning() { + return err + } + } + } + + // 2. Wait for the process to die, in last resort, try to kill the process directly + if err := killProcessDirectly(container); err != nil { + if isErrNoSuchProcess(err) { + return nil + } + return err + } + + container.WaitStop(-1 * time.Second) + return nil +} + +// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error. +func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error { + err := daemon.killWithSignal(container, sig) + if err == syscall.ESRCH { + e := errNoSuchProcess{container.GetPID(), sig} + logrus.Debug(e) + return e + } + return err +} diff --git a/vendor/github.com/docker/docker/daemon/links.go b/vendor/github.com/docker/docker/daemon/links.go new file mode 100644 index 00000000..7f691d4f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links.go @@ -0,0 +1,87 @@ +package daemon + +import ( + "sync" + + "github.com/docker/docker/container" +) + +// linkIndex stores link relationships between containers, including their specified alias +// The alias is the name the parent uses to reference the child +type linkIndex struct { + // idx maps a parent->alias->child relationship + idx map[*container.Container]map[string]*container.Container + // childIdx maps child->parent->aliases + childIdx map[*container.Container]map[*container.Container]map[string]struct{} + mu sync.Mutex +} + +func newLinkIndex() *linkIndex { + return &linkIndex{ + idx: make(map[*container.Container]map[string]*container.Container), + childIdx: make(map[*container.Container]map[*container.Container]map[string]struct{}), + } +} + +// link adds indexes for the passed in parent/child/alias relationships +func (l *linkIndex) link(parent, child *container.Container, alias string) { + l.mu.Lock() + + if l.idx[parent] == nil { + l.idx[parent] = make(map[string]*container.Container) + } + l.idx[parent][alias] = child + if l.childIdx[child] == nil { + l.childIdx[child] = make(map[*container.Container]map[string]struct{}) + } + if l.childIdx[child][parent] == nil { + l.childIdx[child][parent] = make(map[string]struct{}) + } + l.childIdx[child][parent][alias] = struct{}{} + + l.mu.Unlock() +} + +// unlink removes the requested alias for the given parent/child +func (l *linkIndex) unlink(alias string, child, parent *container.Container) { + l.mu.Lock() + delete(l.idx[parent], alias) + delete(l.childIdx[child], parent) + l.mu.Unlock() +} + +// children maps all the aliases-> children for the passed in parent +// aliases here are the aliases the parent uses to refer to the child +func (l *linkIndex) children(parent *container.Container) map[string]*container.Container { + l.mu.Lock() + children := l.idx[parent] + l.mu.Unlock() + return children +} + +// parents maps all the aliases->parent for the passed in child +// aliases here are the aliases the parents use to refer to the child +func (l *linkIndex) parents(child *container.Container) map[string]*container.Container { + l.mu.Lock() + + parents := make(map[string]*container.Container) + for parent, aliases := range l.childIdx[child] { + for alias := range aliases { + parents[alias] = parent + } + } + + l.mu.Unlock() + return parents +} + +// delete deletes all link relationships referencing this container +func (l *linkIndex) delete(container *container.Container) { + l.mu.Lock() + for _, child := range l.idx[container] { + delete(l.childIdx[child], container) + } + delete(l.idx, container) + delete(l.childIdx, container) + l.mu.Unlock() +} diff --git a/vendor/github.com/docker/docker/daemon/links/links.go b/vendor/github.com/docker/docker/daemon/links/links.go new file mode 100644 index 00000000..af15de04 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links/links.go @@ -0,0 +1,141 @@ +package links + +import ( + "fmt" + "path" + "strings" + + "github.com/docker/go-connections/nat" +) + +// Link struct holds informations about parent/child linked container +type Link struct { + // Parent container IP address + ParentIP string + // Child container IP address + ChildIP string + // Link name + Name string + // Child environments variables + ChildEnvironment []string + // Child exposed ports + Ports []nat.Port +} + +// NewLink initializes a new Link struct with the provided options. +func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) *Link { + var ( + i int + ports = make([]nat.Port, len(exposedPorts)) + ) + + for p := range exposedPorts { + ports[i] = p + i++ + } + + return &Link{ + Name: name, + ChildIP: childIP, + ParentIP: parentIP, + ChildEnvironment: env, + Ports: ports, + } +} + +// ToEnv creates a string's slice containing child container informations in +// the form of environment variables which will be later exported on container +// startup. +func (l *Link) ToEnv() []string { + env := []string{} + + _, n := path.Split(l.Name) + alias := strings.Replace(strings.ToUpper(n), "-", "_", -1) + + if p := l.getDefaultPort(); p != nil { + env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) + } + + //sort the ports so that we can bulk the continuous ports together + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + + for i := 0; i < len(l.Ports); { + p := l.Ports[i] + j := nextContiguous(l.Ports, p.Int(), i) + if j > i+1 { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + + q := l.Ports[j] + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) + + i = j + 1 + continue + } else { + i++ + } + } + for _, p := range l.Ports { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + } + + // Load the linked container's name into the environment + env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) + + if l.ChildEnvironment != nil { + for _, v := range l.ChildEnvironment { + parts := strings.SplitN(v, "=", 2) + if len(parts) < 2 { + continue + } + // Ignore a few variables that are added during docker build (and not really relevant to linked containers) + if parts[0] == "HOME" || parts[0] == "PATH" { + continue + } + env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) + } + } + return env +} + +func nextContiguous(ports []nat.Port, value int, index int) int { + if index+1 == len(ports) { + return index + } + for i := index + 1; i < len(ports); i++ { + if ports[i].Int() > value+1 { + return i - 1 + } + + value++ + } + return len(ports) - 1 +} + +// Default port rules +func (l *Link) getDefaultPort() *nat.Port { + var p nat.Port + i := len(l.Ports) + + if i == 0 { + return nil + } else if i > 1 { + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + } + p = l.Ports[0] + return &p +} diff --git a/vendor/github.com/docker/docker/daemon/list.go b/vendor/github.com/docker/docker/daemon/list.go new file mode 100644 index 00000000..44ab3cbc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/list.go @@ -0,0 +1,515 @@ +package daemon + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/volume" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + networktypes "github.com/docker/engine-api/types/network" + "github.com/docker/go-connections/nat" +) + +var acceptedVolumeFilterTags = map[string]bool{ + "dangling": true, +} + +var acceptedPsFilterTags = map[string]bool{ + "ancestor": true, + "before": true, + "exited": true, + "id": true, + "isolation": true, + "label": true, + "name": true, + "status": true, + "since": true, + "volume": true, +} + +// iterationAction represents possible outcomes happening during the container iteration. +type iterationAction int + +// containerReducer represents a reducer for a container. +// Returns the object to serialize by the api. +type containerReducer func(*container.Container, *listContext) (*types.Container, error) + +const ( + // includeContainer is the action to include a container in the reducer. + includeContainer iterationAction = iota + // excludeContainer is the action to exclude a container in the reducer. + excludeContainer + // stopIteration is the action to stop iterating over the list of containers. + stopIteration +) + +// errStopIteration makes the iterator to stop without returning an error. +var errStopIteration = errors.New("container list iteration stopped") + +// List returns an array of all containers registered in the daemon. +func (daemon *Daemon) List() []*container.Container { + return daemon.containers.List() +} + +// listContext is the daemon generated filtering to iterate over containers. +// This is created based on the user specification from types.ContainerListOptions. +type listContext struct { + // idx is the container iteration index for this context + idx int + // ancestorFilter tells whether it should check ancestors or not + ancestorFilter bool + // names is a list of container names to filter with + names map[string][]string + // images is a list of images to filter with + images map[image.ID]bool + // filters is a collection of arguments to filter with, specified by the user + filters filters.Args + // exitAllowed is a list of exit codes allowed to filter with + exitAllowed []int + + // FIXME Remove this for 1.12 as --since and --before are deprecated + // beforeContainer is a filter to ignore containers that appear before the one given + beforeContainer *container.Container + // sinceContainer is a filter to stop the filtering when the iterator arrive to the given container + sinceContainer *container.Container + + // beforeFilter is a filter to ignore containers that appear before the one given + // this is used for --filter=before= and --before=, the latter is deprecated. + beforeFilter *container.Container + // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container + // this is used for --filter=since= and --since=, the latter is deprecated. + sinceFilter *container.Container + // ContainerListOptions is the filters set by the user + *types.ContainerListOptions +} + +// Containers returns the list of containers to show given the user's filtering. +func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { + return daemon.reduceContainers(config, daemon.transformContainer) +} + +// reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. +func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { + containers := []*types.Container{} + + ctx, err := daemon.foldFilter(config) + if err != nil { + return nil, err + } + + for _, container := range daemon.List() { + t, err := daemon.reducePsContainer(container, ctx, reducer) + if err != nil { + if err != errStopIteration { + return nil, err + } + break + } + if t != nil { + containers = append(containers, t) + ctx.idx++ + } + } + return containers, nil +} + +// reducePsContainer is the basic representation for a container as expected by the ps command. +func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *listContext, reducer containerReducer) (*types.Container, error) { + container.Lock() + defer container.Unlock() + + // filter containers to return + action := includeContainerInList(container, ctx) + switch action { + case excludeContainer: + return nil, nil + case stopIteration: + return nil, errStopIteration + } + + // transform internal container struct into api structs + return reducer(container, ctx) +} + +// foldFilter generates the container filter based on the user's filtering options. +func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listContext, error) { + psFilters := config.Filter + + if err := psFilters.Validate(acceptedPsFilterTags); err != nil { + return nil, err + } + + var filtExited []int + + err := psFilters.WalkValues("exited", func(value string) error { + code, err := strconv.Atoi(value) + if err != nil { + return err + } + filtExited = append(filtExited, code) + return nil + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("status", func(value string) error { + if !container.IsValidStateString(value) { + return fmt.Errorf("Unrecognised filter value for status: %s", value) + } + + config.All = true + return nil + }) + if err != nil { + return nil, err + } + + var beforeContFilter, sinceContFilter *container.Container + // FIXME remove this for 1.12 as --since and --before are deprecated + var beforeContainer, sinceContainer *container.Container + + err = psFilters.WalkValues("before", func(value string) error { + beforeContFilter, err = daemon.GetContainer(value) + return err + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("since", func(value string) error { + sinceContFilter, err = daemon.GetContainer(value) + return err + }) + if err != nil { + return nil, err + } + + imagesFilter := map[image.ID]bool{} + var ancestorFilter bool + if psFilters.Include("ancestor") { + ancestorFilter = true + psFilters.WalkValues("ancestor", func(ancestor string) error { + id, err := daemon.GetImageID(ancestor) + if err != nil { + logrus.Warnf("Error while looking up for image %v", ancestor) + return nil + } + if imagesFilter[id] { + // Already seen this ancestor, skip it + return nil + } + // Then walk down the graph and put the imageIds in imagesFilter + populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) + return nil + }) + } + + // FIXME remove this for 1.12 as --since and --before are deprecated + if config.Before != "" { + beforeContainer, err = daemon.GetContainer(config.Before) + if err != nil { + return nil, err + } + } + + // FIXME remove this for 1.12 as --since and --before are deprecated + if config.Since != "" { + sinceContainer, err = daemon.GetContainer(config.Since) + if err != nil { + return nil, err + } + } + + return &listContext{ + filters: psFilters, + ancestorFilter: ancestorFilter, + images: imagesFilter, + exitAllowed: filtExited, + beforeContainer: beforeContainer, + sinceContainer: sinceContainer, + beforeFilter: beforeContFilter, + sinceFilter: sinceContFilter, + ContainerListOptions: config, + names: daemon.nameIndex.GetAll(), + }, nil +} + +// includeContainerInList decides whether a container should be included in the output or not based in the filter. +// It also decides if the iteration should be stopped or not. +func includeContainerInList(container *container.Container, ctx *listContext) iterationAction { + // Do not include container if it's in the list before the filter container. + // Set the filter container to nil to include the rest of containers after this one. + if ctx.beforeFilter != nil { + if container.ID == ctx.beforeFilter.ID { + ctx.beforeFilter = nil + } + return excludeContainer + } + + // Stop iteration when the container arrives to the filter container + if ctx.sinceFilter != nil { + if container.ID == ctx.sinceFilter.ID { + return stopIteration + } + } + + // Do not include container if it's stopped and we're not filters + // FIXME remove the ctx.beforContainer and ctx.sinceContainer part of the condition for 1.12 as --since and --before are deprecated + if !container.Running && !ctx.All && ctx.Limit <= 0 && ctx.beforeContainer == nil && ctx.sinceContainer == nil { + return excludeContainer + } + + // Do not include container if the name doesn't match + if !ctx.filters.Match("name", container.Name) { + return excludeContainer + } + + // Do not include container if the id doesn't match + if !ctx.filters.Match("id", container.ID) { + return excludeContainer + } + + // Do not include container if any of the labels don't match + if !ctx.filters.MatchKVList("label", container.Config.Labels) { + return excludeContainer + } + + // Do not include container if isolation doesn't match + if excludeContainer == excludeByIsolation(container, ctx) { + return excludeContainer + } + + // FIXME remove this for 1.12 as --since and --before are deprecated + if ctx.beforeContainer != nil { + if container.ID == ctx.beforeContainer.ID { + ctx.beforeContainer = nil + } + return excludeContainer + } + + // FIXME remove this for 1.12 as --since and --before are deprecated + if ctx.sinceContainer != nil { + if container.ID == ctx.sinceContainer.ID { + return stopIteration + } + } + + // Stop iteration when the index is over the limit + if ctx.Limit > 0 && ctx.idx == ctx.Limit { + return stopIteration + } + + // Do not include container if its exit code is not in the filter + if len(ctx.exitAllowed) > 0 { + shouldSkip := true + for _, code := range ctx.exitAllowed { + if code == container.ExitCode && !container.Running { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + // Do not include container if its status doesn't match the filter + if !ctx.filters.Match("status", container.State.StateString()) { + return excludeContainer + } + + if ctx.filters.Include("volume") { + volumesByName := make(map[string]*volume.MountPoint) + for _, m := range container.MountPoints { + if m.Name != "" { + volumesByName[m.Name] = m + } else { + volumesByName[m.Source] = m + } + } + + volumeExist := fmt.Errorf("volume mounted in container") + err := ctx.filters.WalkValues("volume", func(value string) error { + if _, exist := container.MountPoints[value]; exist { + return volumeExist + } + if _, exist := volumesByName[value]; exist { + return volumeExist + } + return nil + }) + if err != volumeExist { + return excludeContainer + } + } + + if ctx.ancestorFilter { + if len(ctx.images) == 0 { + return excludeContainer + } + if !ctx.images[container.ImageID] { + return excludeContainer + } + } + + return includeContainer +} + +// transformContainer generates the container type expected by the docker ps command. +func (daemon *Daemon) transformContainer(container *container.Container, ctx *listContext) (*types.Container, error) { + newC := &types.Container{ + ID: container.ID, + Names: ctx.names[container.ID], + ImageID: container.ImageID.String(), + } + if newC.Names == nil { + // Dead containers will often have no name, so make sure the response isn't null + newC.Names = []string{} + } + + image := container.Config.Image // if possible keep the original ref + if image != container.ImageID.String() { + id, err := daemon.GetImageID(image) + if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { + return nil, err + } + if err != nil || id != container.ImageID { + image = container.ImageID.String() + } + } + newC.Image = image + + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + + newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) + } else { + newC.Command = container.Path + } + newC.Created = container.Created.Unix() + newC.State = container.State.StateString() + newC.Status = container.State.String() + newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) + // copy networks to avoid races + networks := make(map[string]*networktypes.EndpointSettings) + for name, network := range container.NetworkSettings.Networks { + if network == nil { + continue + } + networks[name] = &networktypes.EndpointSettings{ + EndpointID: network.EndpointID, + Gateway: network.Gateway, + IPAddress: network.IPAddress, + IPPrefixLen: network.IPPrefixLen, + IPv6Gateway: network.IPv6Gateway, + GlobalIPv6Address: network.GlobalIPv6Address, + GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen, + MacAddress: network.MacAddress, + } + if network.IPAMConfig != nil { + networks[name].IPAMConfig = &networktypes.EndpointIPAMConfig{ + IPv4Address: network.IPAMConfig.IPv4Address, + IPv6Address: network.IPAMConfig.IPv6Address, + } + } + } + newC.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} + + newC.Ports = []types.Port{} + for port, bindings := range container.NetworkSettings.Ports { + p, err := nat.ParsePort(port.Port()) + if err != nil { + return nil, err + } + if len(bindings) == 0 { + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: p, + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + h, err := nat.ParsePort(binding.HostPort) + if err != nil { + return nil, err + } + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: p, + PublicPort: h, + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + + if ctx.Size { + sizeRw, sizeRootFs := daemon.getSize(container) + newC.SizeRw = sizeRw + newC.SizeRootFs = sizeRootFs + } + newC.Labels = container.Config.Labels + newC.Mounts = addMountPoints(container) + + return newC, nil +} + +// Volumes lists known volumes, using the filter to restrict the range +// of volumes returned. +func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { + var ( + volumesOut []*types.Volume + danglingOnly = false + ) + volFilters, err := filters.FromParam(filter) + if err != nil { + return nil, nil, err + } + + if err := volFilters.Validate(acceptedVolumeFilterTags); err != nil { + return nil, nil, err + } + + if volFilters.Include("dangling") { + if volFilters.ExactMatch("dangling", "true") || volFilters.ExactMatch("dangling", "1") { + danglingOnly = true + } else if !volFilters.ExactMatch("dangling", "false") && !volFilters.ExactMatch("dangling", "0") { + return nil, nil, fmt.Errorf("Invalid filter 'dangling=%s'", volFilters.Get("dangling")) + } + } + + volumes, warnings, err := daemon.volumes.List() + if err != nil { + return nil, nil, err + } + if volFilters.Include("dangling") { + volumes = daemon.volumes.FilterByUsed(volumes, !danglingOnly) + } + for _, v := range volumes { + volumesOut = append(volumesOut, volumeToAPIType(v)) + } + return volumesOut, warnings, nil +} + +func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { + if !ancestorMap[imageID] { + for _, id := range getChildren(imageID) { + populateImageFilterByParents(ancestorMap, id, getChildren) + } + ancestorMap[imageID] = true + } +} diff --git a/vendor/github.com/docker/docker/daemon/list_unix.go b/vendor/github.com/docker/docker/daemon/list_unix.go new file mode 100644 index 00000000..8dccbe4e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/list_unix.go @@ -0,0 +1,11 @@ +// +build linux freebsd + +package daemon + +import "github.com/docker/docker/container" + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { + return includeContainer +} diff --git a/vendor/github.com/docker/docker/daemon/logdrivers_linux.go b/vendor/github.com/docker/docker/daemon/logdrivers_linux.go new file mode 100644 index 00000000..568770e0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logdrivers_linux.go @@ -0,0 +1,8 @@ +package daemon + +import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/syslog" +) diff --git a/vendor/github.com/docker/docker/daemon/logger/context.go b/vendor/github.com/docker/docker/daemon/logger/context.go new file mode 100644 index 00000000..eb54c311 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/context.go @@ -0,0 +1,112 @@ +package logger + +import ( + "fmt" + "os" + "strings" + "time" +) + +// Context provides enough information for a logging driver to do its function. +type Context struct { + Config map[string]string + ContainerID string + ContainerName string + ContainerEntrypoint string + ContainerArgs []string + ContainerImageID string + ContainerImageName string + ContainerCreated time.Time + ContainerEnv []string + ContainerLabels map[string]string + LogPath string +} + +// ExtraAttributes returns the user-defined extra attributes (labels, +// environment variables) in key-value format. This can be used by log drivers +// that support metadata to add more context to a log. +func (ctx *Context) ExtraAttributes(keyMod func(string) string) map[string]string { + extra := make(map[string]string) + labels, ok := ctx.Config["labels"] + if ok && len(labels) > 0 { + for _, l := range strings.Split(labels, ",") { + if v, ok := ctx.ContainerLabels[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + env, ok := ctx.Config["env"] + if ok && len(env) > 0 { + envMapping := make(map[string]string) + for _, e := range ctx.ContainerEnv { + if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { + envMapping[kv[0]] = kv[1] + } + } + for _, l := range strings.Split(env, ",") { + if v, ok := envMapping[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + return extra +} + +// Hostname returns the hostname from the underlying OS. +func (ctx *Context) Hostname() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("logger: can not resolve hostname: %v", err) + } + return hostname, nil +} + +// Command returns the command that the container being logged was +// started with. The Entrypoint is prepended to the container +// arguments. +func (ctx *Context) Command() string { + terms := []string{ctx.ContainerEntrypoint} + for _, arg := range ctx.ContainerArgs { + terms = append(terms, arg) + } + command := strings.Join(terms, " ") + return command +} + +// ID Returns the Container ID shortened to 12 characters. +func (ctx *Context) ID() string { + return ctx.ContainerID[:12] +} + +// FullID is an alias of ContainerID. +func (ctx *Context) FullID() string { + return ctx.ContainerID +} + +// Name returns the ContainerName without a preceding '/'. +func (ctx *Context) Name() string { + return ctx.ContainerName[1:] +} + +// ImageID returns the ContainerImageID shortened to 12 characters. +func (ctx *Context) ImageID() string { + return ctx.ContainerImageID[:12] +} + +// ImageFullID is an alias of ContainerImageID. +func (ctx *Context) ImageFullID() string { + return ctx.ContainerImageID +} + +// ImageName is an alias of ContainerImageName +func (ctx *Context) ImageName() string { + return ctx.ContainerImageName +} diff --git a/vendor/github.com/docker/docker/daemon/logger/copier.go b/vendor/github.com/docker/docker/daemon/logger/copier.go new file mode 100644 index 00000000..436c0a8f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/copier.go @@ -0,0 +1,86 @@ +package logger + +import ( + "bufio" + "bytes" + "io" + "sync" + "time" + + "github.com/Sirupsen/logrus" +) + +// Copier can copy logs from specified sources to Logger and attach +// ContainerID and Timestamp. +// Writes are concurrent, so you need implement some sync in your logger +type Copier struct { + // cid is the container id for which we are copying logs + cid string + // srcs is map of name -> reader pairs, for example "stdout", "stderr" + srcs map[string]io.Reader + dst Logger + copyJobs sync.WaitGroup + closed chan struct{} +} + +// NewCopier creates a new Copier +func NewCopier(cid string, srcs map[string]io.Reader, dst Logger) *Copier { + return &Copier{ + cid: cid, + srcs: srcs, + dst: dst, + closed: make(chan struct{}), + } +} + +// Run starts logs copying +func (c *Copier) Run() { + for src, w := range c.srcs { + c.copyJobs.Add(1) + go c.copySrc(src, w) + } +} + +func (c *Copier) copySrc(name string, src io.Reader) { + defer c.copyJobs.Done() + reader := bufio.NewReader(src) + + for { + select { + case <-c.closed: + return + default: + line, err := reader.ReadBytes('\n') + line = bytes.TrimSuffix(line, []byte{'\n'}) + + // ReadBytes can return full or partial output even when it failed. + // e.g. it can return a full entry and EOF. + if err == nil || len(line) > 0 { + if logErr := c.dst.Log(&Message{ContainerID: c.cid, Line: line, Source: name, Timestamp: time.Now().UTC()}); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", line, c.dst.Name(), logErr) + } + } + + if err != nil { + if err != io.EOF { + logrus.Errorf("Error scanning log stream: %s", err) + } + return + } + } + } +} + +// Wait waits until all copying is done +func (c *Copier) Wait() { + c.copyJobs.Wait() +} + +// Close closes the copier +func (c *Copier) Close() { + select { + case <-c.closed: + default: + close(c.closed) + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/factory.go b/vendor/github.com/docker/docker/daemon/logger/factory.go new file mode 100644 index 00000000..5ec0f673 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/factory.go @@ -0,0 +1,89 @@ +package logger + +import ( + "fmt" + "sync" +) + +// Creator builds a logging driver instance with given context. +type Creator func(Context) (Logger, error) + +// LogOptValidator checks the options specific to the underlying +// logging implementation. +type LogOptValidator func(cfg map[string]string) error + +type logdriverFactory struct { + registry map[string]Creator + optValidator map[string]LogOptValidator + m sync.Mutex +} + +func (lf *logdriverFactory) register(name string, c Creator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.registry[name]; ok { + return fmt.Errorf("logger: log driver named '%s' is already registered", name) + } + lf.registry[name] = c + return nil +} + +func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.optValidator[name]; ok { + return fmt.Errorf("logger: log validator named '%s' is already registered", name) + } + lf.optValidator[name] = l + return nil +} + +func (lf *logdriverFactory) get(name string) (Creator, error) { + lf.m.Lock() + defer lf.m.Unlock() + + c, ok := lf.registry[name] + if !ok { + return c, fmt.Errorf("logger: no log driver named '%s' is registered", name) + } + return c, nil +} + +func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { + lf.m.Lock() + defer lf.m.Unlock() + + c, _ := lf.optValidator[name] + return c +} + +var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance + +// RegisterLogDriver registers the given logging driver builder with given logging +// driver name. +func RegisterLogDriver(name string, c Creator) error { + return factory.register(name, c) +} + +// RegisterLogOptValidator registers the logging option validator with +// the given logging driver name. +func RegisterLogOptValidator(name string, l LogOptValidator) error { + return factory.registerLogOptValidator(name, l) +} + +// GetLogDriver provides the logging driver builder for a logging driver name. +func GetLogDriver(name string) (Creator, error) { + return factory.get(name) +} + +// ValidateLogOpts checks the options for the given log driver. The +// options supported are specific to the LogDriver implementation. +func ValidateLogOpts(name string, cfg map[string]string) error { + l := factory.getLogOptValidator(name) + if l != nil { + return l(cfg) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go new file mode 100644 index 00000000..9faa4e02 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go @@ -0,0 +1,147 @@ +// Package jsonfilelog provides the default Logger implementation for +// Docker logging. This logger logs to files on the host server in the +// JSON format. +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/go-units" +) + +// Name is the name of the file that the jsonlogger logs to. +const Name = "json-file" + +// JSONFileLogger is Logger implementation for default Docker logging. +type JSONFileLogger struct { + buf *bytes.Buffer + writer *loggerutils.RotateFileWriter + mu sync.Mutex + readers map[*logger.LogWatcher]struct{} // stores the active log followers + extra []byte // json-encoded extra attributes +} + +func init() { + if err := logger.RegisterLogDriver(Name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates new JSONFileLogger which writes to filename passed in +// on given context. +func New(ctx logger.Context) (logger.Logger, error) { + var capval int64 = -1 + if capacity, ok := ctx.Config["max-size"]; ok { + var err error + capval, err = units.FromHumanSize(capacity) + if err != nil { + return nil, err + } + } + var maxFiles = 1 + if maxFileString, ok := ctx.Config["max-file"]; ok { + var err error + maxFiles, err = strconv.Atoi(maxFileString) + if err != nil { + return nil, err + } + if maxFiles < 1 { + return nil, fmt.Errorf("max-file cannot be less than 1") + } + } + + writer, err := loggerutils.NewRotateFileWriter(ctx.LogPath, capval, maxFiles) + if err != nil { + return nil, err + } + + var extra []byte + if attrs := ctx.ExtraAttributes(nil); len(attrs) > 0 { + var err error + extra, err = json.Marshal(attrs) + if err != nil { + return nil, err + } + } + + return &JSONFileLogger{ + buf: bytes.NewBuffer(nil), + writer: writer, + readers: make(map[*logger.LogWatcher]struct{}), + extra: extra, + }, nil +} + +// Log converts logger.Message to jsonlog.JSONLog and serializes it to file. +func (l *JSONFileLogger) Log(msg *logger.Message) error { + timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp) + if err != nil { + return err + } + l.mu.Lock() + err = (&jsonlog.JSONLogs{ + Log: append(msg.Line, '\n'), + Stream: msg.Source, + Created: timestamp, + RawAttrs: l.extra, + }).MarshalJSONBuf(l.buf) + if err != nil { + l.mu.Unlock() + return err + } + + l.buf.WriteByte('\n') + _, err = l.writer.Write(l.buf.Bytes()) + l.buf.Reset() + l.mu.Unlock() + + return err +} + +// ValidateLogOpt looks for json specific log options max-file & max-size. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "max-file": + case "max-size": + case "labels": + case "env": + default: + return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) + } + } + return nil +} + +// LogPath returns the location the given json logger logs to. +func (l *JSONFileLogger) LogPath() string { + return l.writer.LogPath() +} + +// Close closes underlying file and signals all readers to stop. +func (l *JSONFileLogger) Close() error { + l.mu.Lock() + err := l.writer.Close() + for r := range l.readers { + r.Close() + delete(l.readers, r) + } + l.mu.Unlock() + return err +} + +// Name returns name of this logger. +func (l *JSONFileLogger) Name() string { + return Name +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go new file mode 100644 index 00000000..0c8fb5e5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go @@ -0,0 +1,235 @@ +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/filenotify" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/tailfile" +) + +const maxJSONDecodeRetry = 20000 + +func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { + l.Reset() + if err := dec.Decode(l); err != nil { + return nil, err + } + msg := &logger.Message{ + Source: l.Stream, + Timestamp: l.Created, + Line: []byte(l.Log), + } + return msg, nil +} + +// ReadLogs implements the logger's LogReader interface for the logs +// created by this driver. +func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + + go l.readLogs(logWatcher, config) + return logWatcher +} + +func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + defer close(logWatcher.Msg) + + pth := l.writer.LogPath() + var files []io.ReadSeeker + for i := l.writer.MaxFiles(); i > 1; i-- { + f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) + if err != nil { + if !os.IsNotExist(err) { + logWatcher.Err <- err + break + } + continue + } + files = append(files, f) + } + + latestFile, err := os.Open(pth) + if err != nil { + logWatcher.Err <- err + return + } + + if config.Tail != 0 { + tailer := ioutils.MultiReadSeeker(append(files, latestFile)...) + tailFile(tailer, logWatcher, config.Tail, config.Since) + } + + // close all the rotated files + for _, f := range files { + if err := f.(io.Closer).Close(); err != nil { + logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err) + } + } + + if !config.Follow { + return + } + + if config.Tail >= 0 { + latestFile.Seek(0, os.SEEK_END) + } + + l.mu.Lock() + l.readers[logWatcher] = struct{}{} + l.mu.Unlock() + + notifyRotate := l.writer.NotifyRotate() + followLogs(latestFile, logWatcher, notifyRotate, config.Since) + + l.mu.Lock() + delete(l.readers, logWatcher) + l.mu.Unlock() + + l.writer.NotifyRotateEvict(notifyRotate) +} + +func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { + var rdr io.Reader = f + if tail > 0 { + ls, err := tailfile.TailFile(f, tail) + if err != nil { + logWatcher.Err <- err + return + } + rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) + } + dec := json.NewDecoder(rdr) + l := &jsonlog.JSONLog{} + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err != io.EOF { + logWatcher.Err <- err + } + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } +} + +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { + dec := json.NewDecoder(f) + l := &jsonlog.JSONLog{} + + fileWatcher, err := filenotify.New() + if err != nil { + logWatcher.Err <- err + } + defer func() { + f.Close() + fileWatcher.Close() + }() + name := f.Name() + + if err := fileWatcher.Add(name); err != nil { + logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err) + fileWatcher.Close() + fileWatcher = filenotify.NewPollingWatcher() + + if err := fileWatcher.Add(name); err != nil { + logrus.Debugf("error watching log file for modifications: %v", err) + logWatcher.Err <- err + return + } + } + + var retries int + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err != io.EOF { + // try again because this shouldn't happen + if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { + dec = json.NewDecoder(f) + retries++ + continue + } + + // io.ErrUnexpectedEOF is returned from json.Decoder when there is + // remaining data in the parser's buffer while an io.EOF occurs. + // If the json logger writes a partial json log entry to the disk + // while at the same time the decoder tries to decode it, the race condition happens. + if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { + reader := io.MultiReader(dec.Buffered(), f) + dec = json.NewDecoder(reader) + retries++ + continue + } + + return + } + + select { + case <-fileWatcher.Events(): + dec = json.NewDecoder(f) + continue + case <-fileWatcher.Errors(): + logWatcher.Err <- err + return + case <-logWatcher.WatchClose(): + fileWatcher.Remove(name) + return + case <-notifyRotate: + f.Close() + fileWatcher.Remove(name) + + // retry when the file doesn't exist + for retries := 0; retries <= 5; retries++ { + f, err = os.Open(name) + if err == nil || !os.IsNotExist(err) { + break + } + } + + if err = fileWatcher.Add(name); err != nil { + logWatcher.Err <- err + return + } + if err != nil { + logWatcher.Err <- err + return + } + + dec = json.NewDecoder(f) + continue + } + } + + retries = 0 // reset retries since we've succeeded + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + select { + case logWatcher.Msg <- msg: + case <-logWatcher.WatchClose(): + logWatcher.Msg <- msg + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/logger.go b/vendor/github.com/docker/docker/daemon/logger/logger.go new file mode 100644 index 00000000..cf8d571f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/logger.go @@ -0,0 +1,87 @@ +// Package logger defines interfaces that logger drivers implement to +// log messages. +// +// The other half of a logger driver is the implementation of the +// factory, which holds the contextual instance information that +// allows multiple loggers of the same type to perform different +// actions, such as logging to different locations. +package logger + +import ( + "errors" + "time" + + "github.com/docker/docker/pkg/jsonlog" +) + +// ErrReadLogsNotSupported is returned when the logger does not support reading logs. +var ErrReadLogsNotSupported = errors.New("configured logging reader does not support reading") + +const ( + // TimeFormat is the time format used for timestamps sent to log readers. + TimeFormat = jsonlog.RFC3339NanoFixed + logWatcherBufferSize = 4096 +) + +// Message is datastructure that represents record from some container. +type Message struct { + ContainerID string + Line []byte + Source string + Timestamp time.Time +} + +// Logger is the interface for docker logging drivers. +type Logger interface { + Log(*Message) error + Name() string + Close() error +} + +// ReadConfig is the configuration passed into ReadLogs. +type ReadConfig struct { + Since time.Time + Tail int + Follow bool +} + +// LogReader is the interface for reading log messages for loggers that support reading. +type LogReader interface { + // Read logs from underlying logging backend + ReadLogs(ReadConfig) *LogWatcher +} + +// LogWatcher is used when consuming logs read from the LogReader interface. +type LogWatcher struct { + // For sending log messages to a reader. + Msg chan *Message + // For sending error messages that occur while while reading logs. + Err chan error + closeNotifier chan struct{} +} + +// NewLogWatcher returns a new LogWatcher. +func NewLogWatcher() *LogWatcher { + return &LogWatcher{ + Msg: make(chan *Message, logWatcherBufferSize), + Err: make(chan error, 1), + closeNotifier: make(chan struct{}), + } +} + +// Close notifies the underlying log reader to stop. +func (w *LogWatcher) Close() { + // only close if not already closed + select { + case <-w.closeNotifier: + default: + close(w.closeNotifier) + } +} + +// WatchClose returns a channel receiver that receives notification +// when the watcher has been closed. This should only be called from +// one goroutine. +func (w *LogWatcher) WatchClose() <-chan struct{} { + return w.closeNotifier +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go new file mode 100644 index 00000000..6653b9c4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go @@ -0,0 +1,46 @@ +package loggerutils + +import ( + "bytes" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/utils/templates" +) + +// ParseLogTag generates a context aware tag for consistency across different +// log drivers based on the context of the running container. +func ParseLogTag(ctx logger.Context, defaultTemplate string) (string, error) { + tagTemplate := lookupTagTemplate(ctx, defaultTemplate) + + tmpl, err := templates.NewParse("log-tag", tagTemplate) + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, &ctx); err != nil { + return "", err + } + + return buf.String(), nil +} + +func lookupTagTemplate(ctx logger.Context, defaultTemplate string) string { + tagTemplate := ctx.Config["tag"] + + deprecatedConfigs := []string{"syslog-tag", "gelf-tag", "fluentd-tag"} + for i := 0; tagTemplate == "" && i < len(deprecatedConfigs); i++ { + cfg := deprecatedConfigs[i] + if ctx.Config[cfg] != "" { + tagTemplate = ctx.Config[cfg] + logrus.Warn(fmt.Sprintf("Using log tag from deprecated log-opt '%s'. Please use: --log-opt tag=\"%s\"", cfg, tagTemplate)) + } + } + + if tagTemplate == "" { + tagTemplate = defaultTemplate + } + + return tagTemplate +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go new file mode 100644 index 00000000..99e0964a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go @@ -0,0 +1,124 @@ +package loggerutils + +import ( + "os" + "strconv" + "sync" + + "github.com/docker/docker/pkg/pubsub" +) + +// RotateFileWriter is Logger implementation for default Docker logging. +type RotateFileWriter struct { + f *os.File // store for closing + mu sync.Mutex + capacity int64 //maximum size of each file + currentSize int64 // current size of the latest file + maxFiles int //maximum number of files + notifyRotate *pubsub.Publisher +} + +//NewRotateFileWriter creates new RotateFileWriter +func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateFileWriter, error) { + log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) + if err != nil { + return nil, err + } + + size, err := log.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + + return &RotateFileWriter{ + f: log, + capacity: capacity, + currentSize: size, + maxFiles: maxFiles, + notifyRotate: pubsub.NewPublisher(0, 1), + }, nil +} + +//WriteLog write log message to File +func (w *RotateFileWriter) Write(message []byte) (int, error) { + w.mu.Lock() + if err := w.checkCapacityAndRotate(); err != nil { + w.mu.Unlock() + return -1, err + } + + n, err := w.f.Write(message) + if err == nil { + w.currentSize += int64(n) + } + w.mu.Unlock() + return n, err +} + +func (w *RotateFileWriter) checkCapacityAndRotate() error { + if w.capacity == -1 { + return nil + } + + if w.currentSize >= w.capacity { + name := w.f.Name() + if err := w.f.Close(); err != nil { + return err + } + if err := rotate(name, w.maxFiles); err != nil { + return err + } + file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 06400) + if err != nil { + return err + } + w.f = file + w.currentSize = 0 + w.notifyRotate.Publish(struct{}{}) + } + + return nil +} + +func rotate(name string, maxFiles int) error { + if maxFiles < 2 { + return nil + } + for i := maxFiles - 1; i > 1; i-- { + toPath := name + "." + strconv.Itoa(i) + fromPath := name + "." + strconv.Itoa(i-1) + if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { + return err + } + } + + if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// LogPath returns the location the given writer logs to. +func (w *RotateFileWriter) LogPath() string { + return w.f.Name() +} + +// MaxFiles return maximum number of files +func (w *RotateFileWriter) MaxFiles() int { + return w.maxFiles +} + +//NotifyRotate returns the new subscriber +func (w *RotateFileWriter) NotifyRotate() chan interface{} { + return w.notifyRotate.Subscribe() +} + +//NotifyRotateEvict removes the specified subscriber from receiving any more messages. +func (w *RotateFileWriter) NotifyRotateEvict(sub chan interface{}) { + w.notifyRotate.Evict(sub) +} + +// Close closes underlying file and signals all readers to stop. +func (w *RotateFileWriter) Close() error { + return w.f.Close() +} diff --git a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go new file mode 100644 index 00000000..99e03278 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go @@ -0,0 +1,247 @@ +// +build linux + +// Package syslog provides the logdriver for forwarding server logs to syslog endpoints. +package syslog + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "os" + "path" + "strconv" + "strings" + "time" + + syslog "github.com/RackSec/srslog" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + name = "syslog" + secureProto = "tcp+tls" +) + +var facilities = map[string]syslog.Priority{ + "kern": syslog.LOG_KERN, + "user": syslog.LOG_USER, + "mail": syslog.LOG_MAIL, + "daemon": syslog.LOG_DAEMON, + "auth": syslog.LOG_AUTH, + "syslog": syslog.LOG_SYSLOG, + "lpr": syslog.LOG_LPR, + "news": syslog.LOG_NEWS, + "uucp": syslog.LOG_UUCP, + "cron": syslog.LOG_CRON, + "authpriv": syslog.LOG_AUTHPRIV, + "ftp": syslog.LOG_FTP, + "local0": syslog.LOG_LOCAL0, + "local1": syslog.LOG_LOCAL1, + "local2": syslog.LOG_LOCAL2, + "local3": syslog.LOG_LOCAL3, + "local4": syslog.LOG_LOCAL4, + "local5": syslog.LOG_LOCAL5, + "local6": syslog.LOG_LOCAL6, + "local7": syslog.LOG_LOCAL7, +} + +type syslogger struct { + writer *syslog.Writer +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// rsyslog uses appname part of syslog message to fill in an %syslogtag% template +// attribute in rsyslog.conf. In order to be backward compatible to rfc3164 +// tag will be also used as an appname +func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + pid := os.Getpid() + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", + p, 1, timestamp, hostname, tag, pid, tag, content) + return msg +} + +// New creates a syslog logger using the configuration passed in on +// the context. Supported context configuration variables are +// syslog-address, syslog-facility, & syslog-tag. +func New(ctx logger.Context) (logger.Logger, error) { + tag, err := loggerutils.ParseLogTag(ctx, "{{.ID}}") + if err != nil { + return nil, err + } + + proto, address, err := parseAddress(ctx.Config["syslog-address"]) + if err != nil { + return nil, err + } + + facility, err := parseFacility(ctx.Config["syslog-facility"]) + if err != nil { + return nil, err + } + + syslogFormatter, syslogFramer, err := parseLogFormat(ctx.Config["syslog-format"]) + if err != nil { + return nil, err + } + + logTag := path.Base(os.Args[0]) + "/" + tag + + var log *syslog.Writer + if proto == secureProto { + tlsConfig, tlsErr := parseTLSConfig(ctx.Config) + if tlsErr != nil { + return nil, tlsErr + } + log, err = syslog.DialWithTLSConfig(proto, address, facility, logTag, tlsConfig) + } else { + log, err = syslog.Dial(proto, address, facility, logTag) + } + + if err != nil { + return nil, err + } + + log.SetFormatter(syslogFormatter) + log.SetFramer(syslogFramer) + + return &syslogger{ + writer: log, + }, nil +} + +func (s *syslogger) Log(msg *logger.Message) error { + if msg.Source == "stderr" { + return s.writer.Err(string(msg.Line)) + } + return s.writer.Info(string(msg.Line)) +} + +func (s *syslogger) Close() error { + return s.writer.Close() +} + +func (s *syslogger) Name() string { + return name +} + +func parseAddress(address string) (string, string, error) { + if address == "" { + return "", "", nil + } + if !urlutil.IsTransportURL(address) { + return "", "", fmt.Errorf("syslog-address should be in form proto://address, got %v", address) + } + url, err := url.Parse(address) + if err != nil { + return "", "", err + } + + // unix socket validation + if url.Scheme == "unix" { + if _, err := os.Stat(url.Path); err != nil { + return "", "", err + } + return url.Scheme, url.Path, nil + } + + // here we process tcp|udp + host := url.Host + if _, _, err := net.SplitHostPort(host); err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return "", "", err + } + host = host + ":514" + } + + return url.Scheme, host, nil +} + +// ValidateLogOpt looks for syslog specific log options +// syslog-address, syslog-facility, & syslog-tag. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "syslog-address": + case "syslog-facility": + case "syslog-tag": + case "syslog-tls-ca-cert": + case "syslog-tls-cert": + case "syslog-tls-key": + case "syslog-tls-skip-verify": + case "tag": + case "syslog-format": + default: + return fmt.Errorf("unknown log opt '%s' for syslog log driver", key) + } + } + if _, _, err := parseAddress(cfg["syslog-address"]); err != nil { + return err + } + if _, err := parseFacility(cfg["syslog-facility"]); err != nil { + return err + } + if _, _, err := parseLogFormat(cfg["syslog-format"]); err != nil { + return err + } + return nil +} + +func parseFacility(facility string) (syslog.Priority, error) { + if facility == "" { + return syslog.LOG_DAEMON, nil + } + + if syslogFacility, valid := facilities[facility]; valid { + return syslogFacility, nil + } + + fInt, err := strconv.Atoi(facility) + if err == nil && 0 <= fInt && fInt <= 23 { + return syslog.Priority(fInt << 3), nil + } + + return syslog.Priority(0), errors.New("invalid syslog facility") +} + +func parseTLSConfig(cfg map[string]string) (*tls.Config, error) { + _, skipVerify := cfg["syslog-tls-skip-verify"] + + opts := tlsconfig.Options{ + CAFile: cfg["syslog-tls-ca-cert"], + CertFile: cfg["syslog-tls-cert"], + KeyFile: cfg["syslog-tls-key"], + InsecureSkipVerify: skipVerify, + } + + return tlsconfig.Client(opts) +} + +func parseLogFormat(logFormat string) (syslog.Formatter, syslog.Framer, error) { + switch logFormat { + case "": + return syslog.UnixFormatter, syslog.DefaultFramer, nil + case "rfc3164": + return syslog.RFC3164Formatter, syslog.DefaultFramer, nil + case "rfc5424": + return rfc5424formatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil + default: + return nil, nil, errors.New("Invalid syslog format") + } + +} diff --git a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_unsupported.go new file mode 100644 index 00000000..50cc51b6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package syslog diff --git a/vendor/github.com/docker/docker/daemon/logs.go b/vendor/github.com/docker/docker/daemon/logs.go new file mode 100644 index 00000000..40c47a6e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logs.go @@ -0,0 +1,154 @@ +package daemon + +import ( + "fmt" + "io" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stdcopy" + containertypes "github.com/docker/engine-api/types/container" + timetypes "github.com/docker/engine-api/types/time" +) + +// ContainerLogs hooks up a container's stdout and stderr streams +// configured with the given struct. +func (daemon *Daemon) ContainerLogs(containerName string, config *backend.ContainerLogsConfig, started chan struct{}) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + + if !(config.ShowStdout || config.ShowStderr) { + return fmt.Errorf("You must choose at least one stream") + } + + cLog, err := daemon.getLogger(container) + if err != nil { + return err + } + logReader, ok := cLog.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + + follow := config.Follow && container.IsRunning() + tailLines, err := strconv.Atoi(config.Tail) + if err != nil { + tailLines = -1 + } + + logrus.Debug("logs: begin stream") + + var since time.Time + if config.Since != "" { + s, n, err := timetypes.ParseTimestamps(config.Since, 0) + if err != nil { + return err + } + since = time.Unix(s, n) + } + readConfig := logger.ReadConfig{ + Since: since, + Tail: tailLines, + Follow: follow, + } + logs := logReader.ReadLogs(readConfig) + + wf := ioutils.NewWriteFlusher(config.OutStream) + defer wf.Close() + close(started) + wf.Flush() + + var outStream io.Writer = wf + errStream := outStream + if !container.Config.Tty { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + for { + select { + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + return nil + case <-config.Stop: + logs.Close() + return nil + case msg, ok := <-logs.Msg: + if !ok { + logrus.Debugf("logs: end stream") + logs.Close() + return nil + } + logLine := msg.Line + if config.Timestamps { + logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) + } + if msg.Source == "stdout" && config.ShowStdout { + outStream.Write(logLine) + } + if msg.Source == "stderr" && config.ShowStderr { + errStream.Write(logLine) + } + } + } +} + +func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) { + if container.LogDriver != nil && container.IsRunning() { + return container.LogDriver, nil + } + cfg := daemon.getLogConfig(container.HostConfig.LogConfig) + if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil { + return nil, err + } + return container.StartLogger(cfg) +} + +// StartLogging initializes and starts the container logging stream. +func (daemon *Daemon) StartLogging(container *container.Container) error { + cfg := daemon.getLogConfig(container.HostConfig.LogConfig) + if cfg.Type == "none" { + return nil // do not start logging routines + } + + if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil { + return err + } + l, err := container.StartLogger(cfg) + if err != nil { + return fmt.Errorf("Failed to initialize logging driver: %v", err) + } + + copier := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) + container.LogCopier = copier + copier.Run() + container.LogDriver = l + + // set LogPath field only for json-file logdriver + if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { + container.LogPath = jl.LogPath() + } + + return nil +} + +// getLogConfig returns the log configuration for the container. +func (daemon *Daemon) getLogConfig(cfg containertypes.LogConfig) containertypes.LogConfig { + if cfg.Type != "" || len(cfg.Config) > 0 { // container has log driver configured + if cfg.Type == "" { + cfg.Type = jsonfilelog.Name + } + return cfg + } + + // Use daemon's default log config for containers + return daemon.defaultLogConfig +} diff --git a/vendor/github.com/docker/docker/daemon/monitor.go b/vendor/github.com/docker/docker/daemon/monitor.go new file mode 100644 index 00000000..f9f7def9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/monitor.go @@ -0,0 +1,144 @@ +package daemon + +import ( + "errors" + "fmt" + "io" + "runtime" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/runconfig" +) + +// StateChanged updates daemon state changes from containerd +func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { + c := daemon.containers.Get(id) + if c == nil { + return fmt.Errorf("no such container: %s", id) + } + + switch e.State { + case libcontainerd.StateOOM: + // StateOOM is Linux specific and should never be hit on Windows + if runtime.GOOS == "windows" { + return errors.New("Received StateOOM from libcontainerd on Windows. This should never happen.") + } + daemon.LogContainerEvent(c, "oom") + case libcontainerd.StateExit: + c.Lock() + defer c.Unlock() + c.Wait() + c.Reset(false) + c.SetStopped(platformConstructExitStatus(e)) + attributes := map[string]string{ + "exitCode": strconv.Itoa(int(e.ExitCode)), + } + daemon.LogContainerEventWithAttributes(c, "die", attributes) + daemon.Cleanup(c) + // FIXME: here is race condition between two RUN instructions in Dockerfile + // because they share same runconfig and change image. Must be fixed + // in builder/builder.go + return c.ToDisk() + case libcontainerd.StateRestart: + c.Lock() + defer c.Unlock() + c.Reset(false) + c.RestartCount++ + c.SetRestarting(platformConstructExitStatus(e)) + attributes := map[string]string{ + "exitCode": strconv.Itoa(int(e.ExitCode)), + } + daemon.LogContainerEventWithAttributes(c, "die", attributes) + return c.ToDisk() + case libcontainerd.StateExitProcess: + c.Lock() + defer c.Unlock() + if execConfig := c.ExecCommands.Get(e.ProcessID); execConfig != nil { + ec := int(e.ExitCode) + execConfig.ExitCode = &ec + execConfig.Running = false + execConfig.Wait() + if err := execConfig.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", c.ID, err) + } + + // remove the exec command from the container's store only and not the + // daemon's store so that the exec command can be inspected. + c.ExecCommands.Delete(execConfig.ID) + } else { + logrus.Warnf("Ignoring StateExitProcess for %v but no exec command found", e) + } + case libcontainerd.StateStart, libcontainerd.StateRestore: + c.SetRunning(int(e.Pid), e.State == libcontainerd.StateStart) + c.HasBeenManuallyStopped = false + if err := c.ToDisk(); err != nil { + c.Reset(false) + return err + } + daemon.LogContainerEvent(c, "start") + case libcontainerd.StatePause: + c.Paused = true + daemon.LogContainerEvent(c, "pause") + case libcontainerd.StateResume: + c.Paused = false + daemon.LogContainerEvent(c, "unpause") + } + + return nil +} + +// AttachStreams is called by libcontainerd to connect the stdio. +func (daemon *Daemon) AttachStreams(id string, iop libcontainerd.IOPipe) error { + var s *runconfig.StreamConfig + c := daemon.containers.Get(id) + if c == nil { + ec, err := daemon.getExecConfig(id) + if err != nil { + return fmt.Errorf("no such exec/container: %s", id) + } + s = ec.StreamConfig + } else { + s = c.StreamConfig + if err := daemon.StartLogging(c); err != nil { + c.Reset(false) + return err + } + } + + if stdin := s.Stdin(); stdin != nil { + if iop.Stdin != nil { + go func() { + io.Copy(iop.Stdin, stdin) + iop.Stdin.Close() + }() + } + } else { + if c != nil && !c.Config.Tty { + // tty is enabled, so dont close containerd's iopipe stdin. + if iop.Stdin != nil { + iop.Stdin.Close() + } + } + } + + copy := func(w io.Writer, r io.Reader) { + s.Add(1) + go func() { + if _, err := io.Copy(w, r); err != nil { + logrus.Errorf("%v stream copy error: %v", id, err) + } + s.Done() + }() + } + + if iop.Stdout != nil { + copy(s.Stdout(), iop.Stdout) + } + if iop.Stderr != nil { + copy(s.Stderr(), iop.Stderr) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/monitor_linux.go b/vendor/github.com/docker/docker/daemon/monitor_linux.go new file mode 100644 index 00000000..df8b6c5d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/monitor_linux.go @@ -0,0 +1,14 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + OOMKilled: e.OOMKilled, + } +} diff --git a/vendor/github.com/docker/docker/daemon/mounts.go b/vendor/github.com/docker/docker/daemon/mounts.go new file mode 100644 index 00000000..d4f24b28 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/mounts.go @@ -0,0 +1,48 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/container" + volumestore "github.com/docker/docker/volume/store" +) + +func (daemon *Daemon) prepareMountPoints(container *container.Container) error { + for _, config := range container.MountPoints { + if err := daemon.lazyInitializeVolume(container.ID, config); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error { + var rmErrors []string + for _, m := range container.MountPoints { + if m.Volume == nil { + continue + } + daemon.volumes.Dereference(m.Volume, container.ID) + if rm { + // Do not remove named mountpoints + // these are mountpoints specified like `docker run -v :/foo` + if m.Named { + continue + } + err := daemon.volumes.Remove(m.Volume) + // Ignore volume in use errors because having this + // volume being referenced by other container is + // not an error, but an implementation detail. + // This prevents docker from logging "ERROR: Volume in use" + // where there is another container using the volume. + if err != nil && !volumestore.IsInUse(err) { + rmErrors = append(rmErrors, err.Error()) + } + } + } + if len(rmErrors) > 0 { + return fmt.Errorf("Error removing volumes:\n%v", strings.Join(rmErrors, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/network/settings.go b/vendor/github.com/docker/docker/daemon/network/settings.go new file mode 100644 index 00000000..823bec26 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/network/settings.go @@ -0,0 +1,22 @@ +package network + +import ( + networktypes "github.com/docker/engine-api/types/network" + "github.com/docker/go-connections/nat" +) + +// Settings stores configuration details about the daemon network config +// TODO Windows. Many of these fields can be factored out., +type Settings struct { + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Networks map[string]*networktypes.EndpointSettings + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []networktypes.Address + SecondaryIPv6Addresses []networktypes.Address + IsAnonymousEndpoint bool +} diff --git a/vendor/github.com/docker/docker/daemon/network_operations.go b/vendor/github.com/docker/docker/daemon/network_operations.go new file mode 100644 index 00000000..70097298 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/network_operations.go @@ -0,0 +1,77 @@ +package daemon + +import ( + "fmt" + "os" + + "github.com/docker/docker/container" + derr "github.com/docker/docker/errors" + networktypes "github.com/docker/engine-api/types/network" +) + +func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) error { + return nil +} + +func (daemon *Daemon) updateNetwork(container *container.Container) error { + return nil +} + +func (daemon *Daemon) allocateNetwork(container *container.Container) error { + return nil +} + +func (daemon *Daemon) releaseNetwork(container *container.Container) { + if container.HostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { + return + } +} + +func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) { + nc, err := daemon.GetContainer(connectedContainerID) + if err != nil { + return nil, err + } + if containerID == nc.ID { + return nil, fmt.Errorf("cannot join own network") + } + if !nc.IsRunning() { + err := fmt.Errorf("cannot join network of a non running container: %s", connectedContainerID) + return nil, derr.NewRequestConflictError(err) + } + if nc.IsRestarting() { + return nil, errContainerIsRestarting(connectedContainerID) + } + return nc, nil +} + +func (daemon *Daemon) initializeNetworking(container *container.Container) error { + var err error + + if container.HostConfig.NetworkMode.IsContainer() { + // we need to get the hosts files from the container to join + nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + container.HostnamePath = nc.HostnamePath + container.HostsPath = nc.HostsPath + container.ResolvConfPath = nc.ResolvConfPath + container.Config.Hostname = nc.Config.Hostname + container.Config.Domainname = nc.Config.Domainname + return nil + } + + if container.HostConfig.NetworkMode.IsHost() { + container.Config.Hostname, err = os.Hostname() + if err != nil { + return err + } + } + + if err := daemon.allocateNetwork(container); err != nil { + return err + } + + return container.BuildHostnameFile() +} diff --git a/vendor/github.com/docker/docker/daemon/oci_linux.go b/vendor/github.com/docker/docker/daemon/oci_linux.go new file mode 100644 index 00000000..47d4b89f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/oci_linux.go @@ -0,0 +1,686 @@ +package daemon + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/volume" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runc/libcontainer/user" + "github.com/opencontainers/specs/specs-go" +) + +func setResources(s *specs.Spec, r containertypes.Resources) error { + weightDevices, err := getBlkioWeightDevices(r) + if err != nil { + return err + } + readBpsDevice, err := getBlkioReadBpsDevices(r) + if err != nil { + return err + } + writeBpsDevice, err := getBlkioWriteBpsDevices(r) + if err != nil { + return err + } + readIOpsDevice, err := getBlkioReadIOpsDevices(r) + if err != nil { + return err + } + writeIOpsDevice, err := getBlkioWriteIOpsDevices(r) + if err != nil { + return err + } + + memoryRes := getMemoryResources(r) + cpuRes := getCPUResources(r) + blkioWeight := r.BlkioWeight + + specResources := &specs.Resources{ + Memory: memoryRes, + CPU: cpuRes, + BlockIO: &specs.BlockIO{ + Weight: &blkioWeight, + WeightDevice: weightDevices, + ThrottleReadBpsDevice: readBpsDevice, + ThrottleWriteBpsDevice: writeBpsDevice, + ThrottleReadIOPSDevice: readIOpsDevice, + ThrottleWriteIOPSDevice: writeIOpsDevice, + }, + DisableOOMKiller: r.OomKillDisable, + Pids: &specs.Pids{ + Limit: &r.PidsLimit, + }, + } + + if s.Linux.Resources != nil && len(s.Linux.Resources.Devices) > 0 { + specResources.Devices = s.Linux.Resources.Devices + } + + s.Linux.Resources = specResources + return nil +} + +func setDevices(s *specs.Spec, c *container.Container) error { + // Build lists of devices allowed and created within the container. + var devs []specs.Device + devPermissions := s.Linux.Resources.Devices + if c.HostConfig.Privileged { + hostDevices, err := devices.HostDevices() + if err != nil { + return err + } + for _, d := range hostDevices { + devs = append(devs, specDevice(d)) + } + rwm := "rwm" + devPermissions = []specs.DeviceCgroup{ + { + Allow: true, + Access: &rwm, + }, + } + } else { + for _, deviceMapping := range c.HostConfig.Devices { + d, dPermissions, err := getDevicesFromPath(deviceMapping) + if err != nil { + return err + } + devs = append(devs, d...) + devPermissions = append(devPermissions, dPermissions...) + } + } + + s.Linux.Devices = append(s.Linux.Devices, devs...) + s.Linux.Resources.Devices = devPermissions + return nil +} + +func setRlimits(daemon *Daemon, s *specs.Spec, c *container.Container) error { + var rlimits []specs.Rlimit + + ulimits := c.HostConfig.Ulimits + // Merge ulimits with daemon defaults + ulIdx := make(map[string]struct{}) + for _, ul := range ulimits { + ulIdx[ul.Name] = struct{}{} + } + for name, ul := range daemon.configStore.Ulimits { + if _, exists := ulIdx[name]; !exists { + ulimits = append(ulimits, ul) + } + } + + for _, ul := range ulimits { + rlimits = append(rlimits, specs.Rlimit{ + Type: "RLIMIT_" + strings.ToUpper(ul.Name), + Soft: uint64(ul.Soft), + Hard: uint64(ul.Hard), + }) + } + + s.Process.Rlimits = rlimits + return nil +} + +func setUser(s *specs.Spec, c *container.Container) error { + uid, gid, additionalGids, err := getUser(c, c.Config.User) + if err != nil { + return err + } + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = additionalGids + return nil +} + +func readUserFile(c *container.Container, p string) (io.ReadCloser, error) { + fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS) + if err != nil { + return nil, err + } + return os.Open(fp) +} + +func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { + passwdPath, err := user.GetPasswdPath() + if err != nil { + return 0, 0, nil, err + } + groupPath, err := user.GetGroupPath() + if err != nil { + return 0, 0, nil, err + } + passwdFile, err := readUserFile(c, passwdPath) + if err == nil { + defer passwdFile.Close() + } + groupFile, err := readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + + execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) + if err != nil { + return 0, 0, nil, err + } + + // todo: fix this double read by a change to libcontainer/user pkg + groupFile, err = readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + var addGroups []int + if len(c.HostConfig.GroupAdd) > 0 { + addGroups, err = user.GetAdditionalGroups(c.HostConfig.GroupAdd, groupFile) + if err != nil { + return 0, 0, nil, err + } + } + uid := uint32(execUser.Uid) + gid := uint32(execUser.Gid) + sgids := append(execUser.Sgids, addGroups...) + var additionalGids []uint32 + for _, g := range sgids { + additionalGids = append(additionalGids, uint32(g)) + } + return uid, gid, additionalGids, nil +} + +func setNamespace(s *specs.Spec, ns specs.Namespace) { + for i, n := range s.Linux.Namespaces { + if n.Type == ns.Type { + s.Linux.Namespaces[i] = ns + return + } + } + s.Linux.Namespaces = append(s.Linux.Namespaces, ns) +} + +func setCapabilities(s *specs.Spec, c *container.Container) error { + var caplist []string + var err error + if c.HostConfig.Privileged { + caplist = caps.GetAllCapabilities() + } else { + caplist, err = caps.TweakCapabilities(s.Process.Capabilities, c.HostConfig.CapAdd, c.HostConfig.CapDrop) + if err != nil { + return err + } + } + s.Process.Capabilities = caplist + return nil +} + +func delNamespace(s *specs.Spec, nsType specs.NamespaceType) { + idx := -1 + for i, n := range s.Linux.Namespaces { + if n.Type == nsType { + idx = i + } + } + if idx >= 0 { + s.Linux.Namespaces = append(s.Linux.Namespaces[:idx], s.Linux.Namespaces[idx+1:]...) + } +} + +func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error { + userNS := false + // user + if c.HostConfig.UsernsMode.IsPrivate() { + uidMap, gidMap := daemon.GetUIDGIDMaps() + if uidMap != nil { + userNS = true + ns := specs.Namespace{Type: "user"} + setNamespace(s, ns) + s.Linux.UIDMappings = specMapping(uidMap) + s.Linux.GIDMappings = specMapping(gidMap) + } + } + // network + if !c.Config.NetworkDisabled { + ns := specs.Namespace{Type: "network"} + parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) + if parts[0] == "container" { + nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID()) + if userNS { + // to share a net namespace, they must also share a user namespace + nsUser := specs.Namespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.NetworkMode.IsHost() { + ns.Path = fmt.Sprintf("/proc/%d/ns/net", os.Getpid()) + } + setNamespace(s, ns) + } + // ipc + if c.HostConfig.IpcMode.IsContainer() { + ns := specs.Namespace{Type: "ipc"} + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/ipc", ic.State.GetPID()) + setNamespace(s, ns) + if userNS { + // to share an IPC namespace, they must also share a user namespace + nsUser := specs.Namespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.IpcMode.IsHost() { + delNamespace(s, specs.NamespaceType("ipc")) + } else { + ns := specs.Namespace{Type: "ipc"} + setNamespace(s, ns) + } + // pid + if c.HostConfig.PidMode.IsHost() { + delNamespace(s, specs.NamespaceType("pid")) + } + // uts + if c.HostConfig.UTSMode.IsHost() { + delNamespace(s, specs.NamespaceType("uts")) + s.Hostname = "" + } + + return nil +} + +func specMapping(s []idtools.IDMap) []specs.IDMapping { + var ids []specs.IDMapping + for _, item := range s { + ids = append(ids, specs.IDMapping{ + HostID: uint32(item.HostID), + ContainerID: uint32(item.ContainerID), + Size: uint32(item.Size), + }) + } + return ids +} + +func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { + for _, m := range mountinfo { + if m.Mountpoint == dir { + return m + } + } + return nil +} + +// Get the source mount point of directory passed in as argument. Also return +// optional fields. +func getSourceMount(source string) (string, string, error) { + // Ensure any symlinks are resolved. + sourcePath, err := filepath.EvalSymlinks(source) + if err != nil { + return "", "", err + } + + mountinfos, err := mount.GetMounts() + if err != nil { + return "", "", err + } + + mountinfo := getMountInfo(mountinfos, sourcePath) + if mountinfo != nil { + return sourcePath, mountinfo.Optional, nil + } + + path := sourcePath + for { + path = filepath.Dir(path) + + mountinfo = getMountInfo(mountinfos, path) + if mountinfo != nil { + return path, mountinfo.Optional, nil + } + + if path == "/" { + break + } + } + + // If we are here, we did not find parent mount. Something is wrong. + return "", "", fmt.Errorf("Could not find source mount of %s", source) +} + +// Ensure mount point on which path is mounted, is shared. +func ensureShared(path string) error { + sharedMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } + } + + if !sharedMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared mount.", path, sourceMount) + } + return nil +} + +// Ensure mount point on which path is mounted, is either shared or slave. +func ensureSharedOrSlave(path string) error { + sharedMount := false + slaveMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } else if strings.HasPrefix(opt, "master:") { + slaveMount = true + break + } + } + + if !sharedMount && !slaveMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared or slave mount.", path, sourceMount) + } + return nil +} + +var ( + mountPropagationMap = map[string]int{ + "private": mount.PRIVATE, + "rprivate": mount.RPRIVATE, + "shared": mount.SHARED, + "rshared": mount.RSHARED, + "slave": mount.SLAVE, + "rslave": mount.RSLAVE, + } + + mountPropagationReverseMap = map[int]string{ + mount.PRIVATE: "private", + mount.RPRIVATE: "rprivate", + mount.SHARED: "shared", + mount.RSHARED: "rshared", + mount.SLAVE: "slave", + mount.RSLAVE: "rslave", + } +) + +func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []container.Mount) error { + userMounts := make(map[string]struct{}) + for _, m := range mounts { + userMounts[m.Destination] = struct{}{} + } + + // Filter out mounts that are overriden by user supplied mounts + var defaultMounts []specs.Mount + _, mountDev := userMounts["/dev"] + for _, m := range s.Mounts { + if _, ok := userMounts[m.Destination]; !ok { + if mountDev && strings.HasPrefix(m.Destination, "/dev/") { + continue + } + defaultMounts = append(defaultMounts, m) + } + } + + s.Mounts = defaultMounts + for _, m := range mounts { + for _, cm := range s.Mounts { + if cm.Destination == m.Destination { + return fmt.Errorf("Duplicate mount point '%s'", m.Destination) + } + } + + if m.Source == "tmpfs" { + opt := []string{"noexec", "nosuid", "nodev", volume.DefaultPropagationMode} + if m.Data != "" { + opt = append(opt, strings.Split(m.Data, ",")...) + } else { + opt = append(opt, "size=65536k") + } + + s.Mounts = append(s.Mounts, specs.Mount{Destination: m.Destination, Source: m.Source, Type: "tmpfs", Options: opt}) + continue + } + + mt := specs.Mount{Destination: m.Destination, Source: m.Source, Type: "bind"} + + // Determine property of RootPropagation based on volume + // properties. If a volume is shared, then keep root propagation + // shared. This should work for slave and private volumes too. + // + // For slave volumes, it can be either [r]shared/[r]slave. + // + // For private volumes any root propagation value should work. + pFlag := mountPropagationMap[m.Propagation] + if pFlag == mount.SHARED || pFlag == mount.RSHARED { + if err := ensureShared(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED] + } + } else if pFlag == mount.SLAVE || pFlag == mount.RSLAVE { + if err := ensureSharedOrSlave(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE] + } + } + + opts := []string{"rbind"} + if !m.Writable { + opts = append(opts, "ro") + } + if pFlag != 0 { + opts = append(opts, mountPropagationReverseMap[pFlag]) + } + + mt.Options = opts + s.Mounts = append(s.Mounts, mt) + } + + if s.Root.Readonly { + for i, m := range s.Mounts { + switch m.Destination { + case "/proc", "/dev/pts", "/dev/mqueue": // /dev is remounted by runc + continue + } + if _, ok := userMounts[m.Destination]; !ok { + if !stringutils.InSlice(m.Options, "ro") { + s.Mounts[i].Options = append(s.Mounts[i].Options, "ro") + } + } + } + } + + if c.HostConfig.Privileged { + if !s.Root.Readonly { + // clear readonly for /sys + for i := range s.Mounts { + if s.Mounts[i].Destination == "/sys" { + clearReadOnly(&s.Mounts[i]) + } + } + } + s.Linux.ReadonlyPaths = nil + s.Linux.MaskedPaths = nil + } + + // TODO: until a kernel/mount solution exists for handling remount in a user namespace, + // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) + if uidMap, _ := daemon.GetUIDGIDMaps(); uidMap != nil || c.HostConfig.Privileged { + for i, m := range s.Mounts { + if m.Type == "cgroup" { + clearReadOnly(&s.Mounts[i]) + } + } + } + + return nil +} + +func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return err + } + s.Root = specs.Root{ + Path: c.BaseFS, + Readonly: c.HostConfig.ReadonlyRootfs, + } + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + cwd := c.Config.WorkingDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Args = append([]string{c.Path}, c.Args...) + s.Process.Cwd = cwd + s.Process.Env = c.CreateDaemonEnvironment(linkedEnv) + s.Process.Terminal = c.Config.Tty + s.Hostname = c.FullHostname() + + return nil +} + +func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) { + s := oci.DefaultSpec() + if err := daemon.populateCommonSpec(&s, c); err != nil { + return nil, err + } + + var cgroupsPath string + scopePrefix := "docker" + parent := "/docker" + useSystemd := UsingSystemd(daemon.configStore) + if useSystemd { + parent = "system.slice" + } + + if c.HostConfig.CgroupParent != "" { + parent = c.HostConfig.CgroupParent + } else if daemon.configStore.CgroupParent != "" { + parent = daemon.configStore.CgroupParent + } + + if useSystemd { + cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID + logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath) + } else { + cgroupsPath = filepath.Join(parent, c.ID) + } + s.Linux.CgroupsPath = &cgroupsPath + + if err := setResources(&s, c.HostConfig.Resources); err != nil { + return nil, fmt.Errorf("linux runtime spec resources: %v", err) + } + s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj + if err := setDevices(&s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec devices: %v", err) + } + if err := setRlimits(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec rlimits: %v", err) + } + if err := setUser(&s, c); err != nil { + return nil, fmt.Errorf("linux spec user: %v", err) + } + if err := setNamespaces(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux spec namespaces: %v", err) + } + if err := setCapabilities(&s, c); err != nil { + return nil, fmt.Errorf("linux spec capabilities: %v", err) + } + if err := setSeccomp(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux seccomp: %v", err) + } + + if err := daemon.setupIpcDirs(c); err != nil { + return nil, err + } + + mounts, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + mounts = append(mounts, c.IpcMounts()...) + mounts = append(mounts, c.TmpfsMounts()...) + if err := setMounts(daemon, &s, c, mounts); err != nil { + return nil, fmt.Errorf("linux mounts: %v", err) + } + + //for _, ns := range s.Linux.Namespaces { + // if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled { + // target, err := os.Readlink(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")) + // if err != nil { + // return nil, err + // } + + // s.Hooks = specs.Hooks{ + // Prestart: []specs.Hook{{ + // Path: target, // FIXME: cross-platform + // Args: []string{"libnetwork-setkey", c.ID}, + // }}, + // } + // } + //} + + if apparmor.IsEnabled() { + appArmorProfile := "docker-default" + if len(c.AppArmorProfile) > 0 { + appArmorProfile = c.AppArmorProfile + } else if c.HostConfig.Privileged { + appArmorProfile = "unconfined" + } + s.Process.ApparmorProfile = appArmorProfile + } + s.Process.SelinuxLabel = c.GetProcessLabel() + s.Process.NoNewPrivileges = c.NoNewPrivileges + s.Linux.MountLabel = c.MountLabel + + return (*libcontainerd.Spec)(&s), nil +} + +func clearReadOnly(m *specs.Mount) { + var opt []string + for _, o := range m.Options { + if o != "ro" { + opt = append(opt, o) + } + } + m.Options = opt +} diff --git a/vendor/github.com/docker/docker/daemon/pause.go b/vendor/github.com/docker/docker/daemon/pause.go new file mode 100644 index 00000000..dbfafbc5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/pause.go @@ -0,0 +1,49 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerPause pauses a container +func (daemon *Daemon) ContainerPause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerPause(container); err != nil { + return err + } + + return nil +} + +// containerPause pauses the container execution without stopping the process. +// The execution can be resumed by calling containerUnpause. +func (daemon *Daemon) containerPause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot Pause the container which is not running + if !container.Running { + return errNotRunning{container.ID} + } + + // We cannot Pause the container which is already paused + if container.Paused { + return fmt.Errorf("Container %s is already paused", container.ID) + } + + // We cannot Pause the container which is restarting + if container.Restarting { + return errContainerIsRestarting(container.ID) + } + + if err := daemon.containerd.Pause(container.ID); err != nil { + return fmt.Errorf("Cannot pause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/rename.go b/vendor/github.com/docker/docker/daemon/rename.go new file mode 100644 index 00000000..7989ad23 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/rename.go @@ -0,0 +1,65 @@ +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" +) + +// ContainerRename changes the name of a container, using the oldName +// to find the container. An error is returned if newName is already +// reserved. +func (daemon *Daemon) ContainerRename(oldName, newName string) error { + if oldName == "" || newName == "" { + return fmt.Errorf("Neither old nor new names may be empty") + } + + container, err := daemon.GetContainer(oldName) + if err != nil { + return err + } + + oldName = container.Name + + container.Lock() + defer container.Unlock() + if newName, err = daemon.reserveName(container.ID, newName); err != nil { + return fmt.Errorf("Error when allocating new name: %v", err) + } + + container.Name = newName + + defer func() { + if err != nil { + container.Name = oldName + daemon.reserveName(container.ID, oldName) + daemon.releaseName(newName) + } + }() + + daemon.releaseName(oldName) + if err = container.ToDisk(); err != nil { + return err + } + + attributes := map[string]string{ + "oldName": oldName, + } + + if !container.Running { + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil + } + + defer func() { + if err != nil { + container.Name = oldName + if e := container.ToDisk(); e != nil { + logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) + } + } + }() + + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/resize.go b/vendor/github.com/docker/docker/daemon/resize.go new file mode 100644 index 00000000..74735385 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/resize.go @@ -0,0 +1,40 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/libcontainerd" +) + +// ContainerResize changes the size of the TTY of the process running +// in the container with the given name to the given height and width. +func (daemon *Daemon) ContainerResize(name string, height, width int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + if err = daemon.containerd.Resize(container.ID, libcontainerd.InitFriendlyName, width, height); err == nil { + attributes := map[string]string{ + "height": fmt.Sprintf("%d", height), + "width": fmt.Sprintf("%d", width), + } + daemon.LogContainerEventWithAttributes(container, "resize", attributes) + } + return err +} + +// ContainerExecResize changes the size of the TTY of the process +// running in the exec with the given name to the given height and +// width. +func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { + ec, err := daemon.getExecConfig(name) + if err != nil { + return err + } + return daemon.containerd.Resize(ec.ContainerID, ec.ID, width, height) +} diff --git a/vendor/github.com/docker/docker/daemon/restart.go b/vendor/github.com/docker/docker/daemon/restart.go new file mode 100644 index 00000000..3779116c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/restart.go @@ -0,0 +1,48 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerRestart stops and starts a container. It attempts to +// gracefully stop the container within the given timeout, forcefully +// stopping it if the timeout is exceeded. If given a negative +// timeout, ContainerRestart will wait forever until a graceful +// stop. Returns an error if the container cannot be found, or if +// there is an underlying error at any stage of the restart. +func (daemon *Daemon) ContainerRestart(name string, seconds int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if err := daemon.containerRestart(container, seconds); err != nil { + return fmt.Errorf("Cannot restart container %s: %v", name, err) + } + return nil +} + +// containerRestart attempts to gracefully stop and then start the +// container. When stopping, wait for the given duration in seconds to +// gracefully stop, before forcefully terminating the container. If +// given a negative duration, wait forever for a graceful stop. +func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error { + // Avoid unnecessarily unmounting and then directly mounting + // the container when the container stops and then starts + // again + if err := daemon.Mount(container); err == nil { + defer daemon.Unmount(container) + } + + if err := daemon.containerStop(container, seconds); err != nil { + return err + } + + if err := daemon.containerStart(container); err != nil { + return err + } + + daemon.LogContainerEvent(container, "restart") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/seccomp_disabled.go b/vendor/github.com/docker/docker/daemon/seccomp_disabled.go new file mode 100644 index 00000000..620eee29 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/seccomp_disabled.go @@ -0,0 +1,12 @@ +// +build !seccomp,!windows + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/opencontainers/specs/specs-go" +) + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/seccomp_linux.go b/vendor/github.com/docker/docker/daemon/seccomp_linux.go new file mode 100644 index 00000000..659a15de --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/seccomp_linux.go @@ -0,0 +1,46 @@ +// +build linux,seccomp + +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/profiles/seccomp" + "github.com/opencontainers/specs/specs-go" +) + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + var profile *specs.Seccomp + var err error + + if c.HostConfig.Privileged { + return nil + } + + if !daemon.seccompEnabled { + if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { + return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.") + } + logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.") + c.SeccompProfile = "unconfined" + } + if c.SeccompProfile == "unconfined" { + return nil + } + if c.SeccompProfile != "" { + profile, err = seccomp.LoadProfile(c.SeccompProfile) + if err != nil { + return err + } + } else { + profile, err = seccomp.GetDefaultProfile() + if err != nil { + return err + } + } + + rs.Linux.Seccomp = profile + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/selinux_linux.go b/vendor/github.com/docker/docker/daemon/selinux_linux.go new file mode 100644 index 00000000..83a34471 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/selinux_linux.go @@ -0,0 +1,17 @@ +// +build linux + +package daemon + +import "github.com/opencontainers/runc/libcontainer/selinux" + +func selinuxSetDisabled() { + selinux.SetDisabled() +} + +func selinuxFreeLxcContexts(label string) { + selinux.FreeLxcContexts(label) +} + +func selinuxEnabled() bool { + return selinux.SelinuxEnabled() +} diff --git a/vendor/github.com/docker/docker/daemon/selinux_unsupported.go b/vendor/github.com/docker/docker/daemon/selinux_unsupported.go new file mode 100644 index 00000000..25a56ad1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/selinux_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package daemon + +func selinuxSetDisabled() { +} + +func selinuxFreeLxcContexts(label string) { +} + +func selinuxEnabled() bool { + return false +} diff --git a/vendor/github.com/docker/docker/daemon/start.go b/vendor/github.com/docker/docker/daemon/start.go new file mode 100644 index 00000000..1b34f426 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/start.go @@ -0,0 +1,185 @@ +package daemon + +import ( + "fmt" + "net/http" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/errors" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/runconfig" + containertypes "github.com/docker/engine-api/types/container" +) + +// ContainerStart starts a container. +func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if container.IsPaused() { + return fmt.Errorf("Cannot start a paused container, try unpause instead.") + } + + if container.IsRunning() { + err := fmt.Errorf("Container already started") + return errors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + + // Windows does not have the backwards compatibility issue here. + if runtime.GOOS != "windows" { + // This is kept for backward compatibility - hostconfig should be passed when + // creating a container, not during start. + if hostConfig != nil { + logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and will be removed in Docker 1.12") + oldNetworkMode := container.HostConfig.NetworkMode + if err := daemon.setSecurityOptions(container, hostConfig); err != nil { + return err + } + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return err + } + newNetworkMode := container.HostConfig.NetworkMode + if string(oldNetworkMode) != string(newNetworkMode) { + // if user has change the network mode on starting, clean up the + // old networks. It is a deprecated feature and will be removed in Docker 1.12 + container.NetworkSettings.Networks = nil + if err := container.ToDisk(); err != nil { + return err + } + } + container.InitDNSHostConfig() + } + } else { + if hostConfig != nil { + return fmt.Errorf("Supplying a hostconfig on start is not supported. It should be supplied on create") + } + } + + // check if hostConfig is in line with the current system settings. + // It may happen cgroups are umounted or the like. + if _, err = daemon.verifyContainerSettings(container.HostConfig, nil, false); err != nil { + return err + } + // Adapt for old containers in case we have updates in this function and + // old containers never have chance to call the new function in create stage. + if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil { + return err + } + + return daemon.containerStart(container) +} + +// Start starts a container +func (daemon *Daemon) Start(container *container.Container) error { + return daemon.containerStart(container) +} + +// containerStart prepares the container to run by setting up everything the +// container needs, such as storage and networking, as well as links +// between containers. The container is left waiting for a signal to +// begin running. +func (daemon *Daemon) containerStart(container *container.Container) (err error) { + container.Lock() + defer container.Unlock() + + if container.Running { + return nil + } + + if container.RemovalInProgress || container.Dead { + return fmt.Errorf("Container is marked for removal and cannot be started.") + } + + // if we encounter an error during start we need to ensure that any other + // setup has been cleaned up properly + defer func() { + if err != nil { + container.SetError(err) + // if no one else has set it, make sure we don't leave it at zero + if container.ExitCode == 0 { + container.ExitCode = 128 + } + container.ToDisk() + daemon.Cleanup(container) + attributes := map[string]string{ + "exitCode": fmt.Sprintf("%d", container.ExitCode), + } + daemon.LogContainerEventWithAttributes(container, "die", attributes) + } + }() + + if err := daemon.conditionalMountOnStart(container); err != nil { + return err + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards API compatibility. + container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) + + if err := daemon.initializeNetworking(container); err != nil { + return err + } + + spec, err := daemon.createSpec(container) + if err != nil { + return err + } + + if err := daemon.containerd.Create(container.ID, *spec, libcontainerd.WithRestartManager(container.RestartManager(true))); err != nil { + // if we receive an internal error from the initial start of a container then lets + // return it instead of entering the restart loop + // set to 127 for container cmd not found/does not exist) + if strings.Contains(err.Error(), "executable file not found") || + strings.Contains(err.Error(), "no such file or directory") || + strings.Contains(err.Error(), "system cannot find the file specified") { + container.ExitCode = 127 + err = fmt.Errorf("Container command '%s' not found or does not exist.", container.Path) + } + // set to 126 for container cmd can't be invoked errors + if strings.Contains(err.Error(), syscall.EACCES.Error()) { + container.ExitCode = 126 + err = fmt.Errorf("Container command '%s' could not be invoked.", container.Path) + } + + container.Reset(false) + + // start event is logged even on error + daemon.LogContainerEvent(container, "start") + return err + } + + return nil +} + +// Cleanup releases any network resources allocated to the container along with any rules +// around how containers are linked together. It also unmounts the container's root filesystem. +func (daemon *Daemon) Cleanup(container *container.Container) { + daemon.releaseNetwork(container) + + container.UnmountIpcMounts(detachMounted) + + if err := daemon.conditionalUnmountOnCleanup(container); err != nil { + // FIXME: remove once reference counting for graphdrivers has been refactored + // Ensure that all the mounts are gone + if mountid, err := daemon.layerStore.GetMountID(container.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + } + + for _, eConfig := range container.ExecCommands.Commands() { + daemon.unregisterExecCommand(container, eConfig) + } + + if container.BaseFS != "" { + if err := container.UnmountVolumes(false, daemon.LogVolumeEvent); err != nil { + logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) + } + } + container.CancelAttachContext() +} diff --git a/vendor/github.com/docker/docker/daemon/stats.go b/vendor/github.com/docker/docker/daemon/stats.go new file mode 100644 index 00000000..cb3478cc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats.go @@ -0,0 +1,121 @@ +package daemon + +import ( + "encoding/json" + "errors" + "runtime" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/version" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/versions/v1p20" +) + +// ContainerStats writes information about the container to the stream +// given in the config object. +func (daemon *Daemon) ContainerStats(prefixOrName string, config *backend.ContainerStatsConfig) error { + if runtime.GOOS == "windows" { + return errors.New("Windows does not support stats") + } + // Remote API version (used for backwards compatibility) + apiVersion := version.Version(config.Version) + + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + + // If the container is not running and requires no stream, return an empty stats. + if !container.IsRunning() && !config.Stream { + return json.NewEncoder(config.OutStream).Encode(&types.Stats{}) + } + + outStream := config.OutStream + if config.Stream { + wf := ioutils.NewWriteFlusher(outStream) + defer wf.Close() + wf.Flush() + outStream = wf + } + + var preCPUStats types.CPUStats + getStatJSON := func(v interface{}) *types.StatsJSON { + ss := v.(types.StatsJSON) + ss.PreCPUStats = preCPUStats + // ss.MemoryStats.Limit = uint64(update.MemoryLimit) + preCPUStats = ss.CPUStats + return &ss + } + + enc := json.NewEncoder(outStream) + + updates := daemon.subscribeToContainerStats(container) + defer daemon.unsubscribeToContainerStats(container, updates) + + noStreamFirstFrame := true + for { + select { + case v, ok := <-updates: + if !ok { + return nil + } + + var statsJSON interface{} + statsJSONPost120 := getStatJSON(v) + if apiVersion.LessThan("1.21") { + var ( + rxBytes uint64 + rxPackets uint64 + rxErrors uint64 + rxDropped uint64 + txBytes uint64 + txPackets uint64 + txErrors uint64 + txDropped uint64 + ) + for _, v := range statsJSONPost120.Networks { + rxBytes += v.RxBytes + rxPackets += v.RxPackets + rxErrors += v.RxErrors + rxDropped += v.RxDropped + txBytes += v.TxBytes + txPackets += v.TxPackets + txErrors += v.TxErrors + txDropped += v.TxDropped + } + statsJSON = &v1p20.StatsJSON{ + Stats: statsJSONPost120.Stats, + Network: types.NetworkStats{ + RxBytes: rxBytes, + RxPackets: rxPackets, + RxErrors: rxErrors, + RxDropped: rxDropped, + TxBytes: txBytes, + TxPackets: txPackets, + TxErrors: txErrors, + TxDropped: txDropped, + }, + } + } else { + statsJSON = statsJSONPost120 + } + + if !config.Stream && noStreamFirstFrame { + // prime the cpu stats so they aren't 0 in the final output + noStreamFirstFrame = false + continue + } + + if err := enc.Encode(statsJSON); err != nil { + return err + } + + if !config.Stream { + return nil + } + case <-config.Stop: + return nil + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector_unix.go b/vendor/github.com/docker/docker/daemon/stats_collector_unix.go new file mode 100644 index 00000000..1f016322 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats_collector_unix.go @@ -0,0 +1,189 @@ +// +build !windows + +package daemon + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/pubsub" + sysinfo "github.com/docker/docker/pkg/system" + "github.com/docker/engine-api/types" + "github.com/opencontainers/runc/libcontainer/system" +) + +type statsSupervisor interface { + // GetContainerStats collects all the stats related to a container + GetContainerStats(container *container.Container) (*types.StatsJSON, error) +} + +// newStatsCollector returns a new statsCollector that collections +// network and cgroup stats for a registered container at the specified +// interval. The collector allows non-running containers to be added +// and will start processing stats when they are started. +func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { + s := &statsCollector{ + interval: interval, + supervisor: daemon, + publishers: make(map[*container.Container]*pubsub.Publisher), + clockTicksPerSecond: uint64(system.GetClockTicks()), + bufReader: bufio.NewReaderSize(nil, 128), + } + meminfo, err := sysinfo.ReadMemInfo() + if err == nil && meminfo.MemTotal > 0 { + s.machineMemory = uint64(meminfo.MemTotal) + } + + go s.run() + return s +} + +// statsCollector manages and provides container resource stats +type statsCollector struct { + m sync.Mutex + supervisor statsSupervisor + interval time.Duration + clockTicksPerSecond uint64 + publishers map[*container.Container]*pubsub.Publisher + bufReader *bufio.Reader + machineMemory uint64 +} + +// collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *statsCollector) collect(c *container.Container) chan interface{} { + s.m.Lock() + defer s.m.Unlock() + publisher, exists := s.publishers[c] + if !exists { + publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) + s.publishers[c] = publisher + } + return publisher.Subscribe() +} + +// stopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *statsCollector) stopCollection(c *container.Container) { + s.m.Lock() + if publisher, exists := s.publishers[c]; exists { + publisher.Close() + delete(s.publishers, c) + } + s.m.Unlock() +} + +// unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { + s.m.Lock() + publisher := s.publishers[c] + if publisher != nil { + publisher.Evict(ch) + if publisher.Len() == 0 { + delete(s.publishers, c) + } + } + s.m.Unlock() +} + +func (s *statsCollector) run() { + type publishersPair struct { + container *container.Container + publisher *pubsub.Publisher + } + // we cannot determine the capacity here. + // it will grow enough in first iteration + var pairs []publishersPair + + for range time.Tick(s.interval) { + // it does not make sense in the first iteration, + // but saves allocations in further iterations + pairs = pairs[:0] + + s.m.Lock() + for container, publisher := range s.publishers { + // copy pointers here to release the lock ASAP + pairs = append(pairs, publishersPair{container, publisher}) + } + s.m.Unlock() + if len(pairs) == 0 { + continue + } + + systemUsage, err := s.getSystemCPUUsage() + if err != nil { + logrus.Errorf("collecting system cpu usage: %v", err) + continue + } + + for _, pair := range pairs { + stats, err := s.supervisor.GetContainerStats(pair.container) + if err != nil { + if _, ok := err.(errNotRunning); !ok { + logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) + } + continue + } + // FIXME: move to containerd + stats.CPUStats.SystemUsage = systemUsage + + pair.publisher.Publish(*stats) + } + } +} + +const nanoSecondsPerSecond = 1e9 + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. +// +// Uses /proc/stat defined by POSIX. Looks for the cpu +// statistics line and then sums up the first seven fields +// provided. See `man 5 proc` for details on specific field +// information. +func (s *statsCollector) getSystemCPUUsage() (uint64, error) { + var line string + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + defer func() { + s.bufReader.Reset(nil) + f.Close() + }() + s.bufReader.Reset(f) + err = nil + for err == nil { + line, err = s.bufReader.ReadString('\n') + if err != nil { + break + } + parts := strings.Fields(line) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("invalid number of cpu fields") + } + var totalClockTicks uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) + } + totalClockTicks += v + } + return (totalClockTicks * nanoSecondsPerSecond) / + s.clockTicksPerSecond, nil + } + } + return 0, fmt.Errorf("invalid stat format. Error trying to parse the '/proc/stat' file") +} diff --git a/vendor/github.com/docker/docker/daemon/stop.go b/vendor/github.com/docker/docker/daemon/stop.go new file mode 100644 index 00000000..70174300 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stop.go @@ -0,0 +1,65 @@ +package daemon + +import ( + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/errors" +) + +// ContainerStop looks for the given container and terminates it, +// waiting the given number of seconds before forcefully killing the +// container. If a negative number of seconds is given, ContainerStop +// will wait for a graceful termination. An error is returned if the +// container is not found, is already stopped, or if there is a +// problem stopping the container. +func (daemon *Daemon) ContainerStop(name string, seconds int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if !container.IsRunning() { + err := fmt.Errorf("Container %s is already stopped", name) + return errors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + if err := daemon.containerStop(container, seconds); err != nil { + return fmt.Errorf("Cannot stop container %s: %v", name, err) + } + return nil +} + +// containerStop halts a container by sending a stop signal, waiting for the given +// duration in seconds, and then calling SIGKILL and waiting for the +// process to exit. If a negative duration is given, Stop will wait +// for the initial signal forever. If the container is not running Stop returns +// immediately. +func (daemon *Daemon) containerStop(container *container.Container, seconds int) error { + if !container.IsRunning() { + return nil + } + + stopSignal := container.StopSignal() + // 1. Send a stop signal + if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil { + logrus.Infof("Failed to send signal %d to the process, force killing", stopSignal) + if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { + return err + } + } + + // 2. Wait for the process to exit on its own + if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { + logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal) + // 3. If it doesn't, then send SIGKILL + if err := daemon.Kill(container); err != nil { + container.WaitStop(-1 * time.Second) + logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it + } + } + + daemon.LogContainerEvent(container, "stop") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/top_unix.go b/vendor/github.com/docker/docker/daemon/top_unix.go new file mode 100644 index 00000000..5afa6024 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/top_unix.go @@ -0,0 +1,85 @@ +//+build !windows + +package daemon + +import ( + "fmt" + "github.com/docker/containerd/subreaper/exec" + "strconv" + "strings" + + "github.com/docker/engine-api/types" +) + +// ContainerTop lists the processes running inside of the given +// container by calling ps with the given args, or with the flags +// "-ef" if no args are given. An error is returned if the container +// is not found, or is not running, or if there are any problems +// running ps, or parsing the output. +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { + if psArgs == "" { + psArgs = "-ef" + } + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + + pids, err := daemon.containerd.GetPidsForContainer(container.ID) + if err != nil { + return nil, err + } + + output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() + if err != nil { + return nil, fmt.Errorf("Error running ps: %v", err) + } + + procList := &types.ContainerProcessList{} + + lines := strings.Split(string(output), "\n") + procList.Titles = strings.Fields(lines[0]) + + pidIndex := -1 + for i, name := range procList.Titles { + if name == "PID" { + pidIndex = i + } + } + if pidIndex == -1 { + return nil, fmt.Errorf("Couldn't find PID field in ps output") + } + + // loop through the output and extract the PID from each line + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := strings.Fields(line) + p, err := strconv.Atoi(fields[pidIndex]) + if err != nil { + return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + } + + for _, pid := range pids { + if pid == p { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(procList.Titles)-1] + process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) + procList.Processes = append(procList.Processes, process) + } + } + } + daemon.LogContainerEvent(container, "top") + return procList, nil +} diff --git a/vendor/github.com/docker/docker/daemon/unpause.go b/vendor/github.com/docker/docker/daemon/unpause.go new file mode 100644 index 00000000..c1ab74b0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/unpause.go @@ -0,0 +1,43 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerUnpause unpauses a container +func (daemon *Daemon) ContainerUnpause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerUnpause(container); err != nil { + return err + } + + return nil +} + +// containerUnpause resumes the container execution after the container is paused. +func (daemon *Daemon) containerUnpause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot unpause the container which is not running + if !container.Running { + return errNotRunning{container.ID} + } + + // We cannot unpause the container which is not paused + if !container.Paused { + return fmt.Errorf("Container %s is not paused", container.ID) + } + + if err := daemon.containerd.Resume(container.ID); err != nil { + return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/update.go b/vendor/github.com/docker/docker/daemon/update.go new file mode 100644 index 00000000..fee470a3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/update.go @@ -0,0 +1,100 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/engine-api/types/container" +) + +// ContainerUpdate updates configuration of the container +func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) ([]string, error) { + var warnings []string + + warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true) + if err != nil { + return warnings, err + } + + if err := daemon.update(name, hostConfig); err != nil { + return warnings, err + } + + return warnings, nil +} + +// ContainerUpdateCmdOnBuild updates Path and Args for the container with ID cID. +func (daemon *Daemon) ContainerUpdateCmdOnBuild(cID string, cmd []string) error { + if len(cmd) == 0 { + return nil + } + c, err := daemon.GetContainer(cID) + if err != nil { + return err + } + c.Path = cmd[0] + c.Args = cmd[1:] + return nil +} + +func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { + if hostConfig == nil { + return nil + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + restoreConfig := false + backupHostConfig := *container.HostConfig + defer func() { + if restoreConfig { + container.Lock() + container.HostConfig = &backupHostConfig + container.ToDisk() + container.Unlock() + } + }() + + if container.RemovalInProgress || container.Dead { + return errCannotUpdate(container.ID, fmt.Errorf("Container is marked for removal and cannot be \"update\".")) + } + + if container.IsRunning() && hostConfig.KernelMemory != 0 { + return errCannotUpdate(container.ID, fmt.Errorf("Can not update kernel memory to a running container, please stop it first.")) + } + + if err := container.UpdateContainer(hostConfig); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + + // if Restart Policy changed, we need to update container monitor + container.UpdateMonitor(hostConfig.RestartPolicy) + + // if container is restarting, wait 5 seconds until it's running + if container.IsRestarting() { + container.WaitRunning(5 * time.Second) + } + + // If container is not running, update hostConfig struct is enough, + // resources will be updated when the container is started again. + // If container is running (including paused), we need to update configs + // to the real world. + if container.IsRunning() && !container.IsRestarting() { + if err := daemon.containerd.UpdateResources(container.ID, toContainerdResources(hostConfig.Resources)); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + } + + daemon.LogContainerEvent(container, "update") + + return nil +} + +func errCannotUpdate(containerID string, err error) error { + return fmt.Errorf("Cannot update container %s: %v", containerID, err) +} diff --git a/vendor/github.com/docker/docker/daemon/update_linux.go b/vendor/github.com/docker/docker/daemon/update_linux.go new file mode 100644 index 00000000..97ba7c09 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/update_linux.go @@ -0,0 +1,25 @@ +// +build linux + +package daemon + +import ( + "github.com/docker/docker/libcontainerd" + "github.com/docker/engine-api/types/container" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + r.BlkioWeight = uint32(resources.BlkioWeight) + r.CpuShares = uint32(resources.CPUShares) + r.CpuPeriod = uint32(resources.CPUPeriod) + r.CpuQuota = uint32(resources.CPUQuota) + r.CpusetCpus = resources.CpusetCpus + r.CpusetMems = resources.CpusetMems + r.MemoryLimit = uint32(resources.Memory) + if resources.MemorySwap > 0 { + r.MemorySwap = uint32(resources.MemorySwap) + } + r.MemoryReservation = uint32(resources.MemoryReservation) + r.KernelMemoryLimit = uint32(resources.KernelMemory) + return r +} diff --git a/vendor/github.com/docker/docker/daemon/volumes.go b/vendor/github.com/docker/docker/daemon/volumes.go new file mode 100644 index 00000000..37f4e7fa --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes.go @@ -0,0 +1,178 @@ +package daemon + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/volume" + "github.com/docker/engine-api/types" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // ErrVolumeReadonly is used to signal an error when trying to copy data into + // a volume mount that is not writable. + ErrVolumeReadonly = errors.New("mounted volume is marked read-only") +) + +type mounts []container.Mount + +// volumeToAPIType converts a volume.Volume to the type used by the remote API +func volumeToAPIType(v volume.Volume) *types.Volume { + tv := &types.Volume{ + Name: v.Name(), + Driver: v.DriverName(), + Mountpoint: v.Path(), + } + if v, ok := v.(interface { + Labels() map[string]string + }); ok { + tv.Labels = v.Labels() + } + return tv +} + +// Len returns the number of mounts. Used in sorting. +func (m mounts) Len() int { + return len(m) +} + +// Less returns true if the number of parts (a/b/c would be 3 parts) in the +// mount indexed by parameter 1 is less than that of the mount indexed by +// parameter 2. Used in sorting. +func (m mounts) Less(i, j int) bool { + return m.parts(i) < m.parts(j) +} + +// Swap swaps two items in an array of mounts. Used in sorting +func (m mounts) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +// parts returns the number of parts in the destination of a mount. Used in sorting. +func (m mounts) parts(i int) int { + return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) +} + +// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. +// It follows the next sequence to decide what to mount in each final destination: +// +// 1. Select the previously configured mount points for the containers, if any. +// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. +// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. +// 4. Cleanup old volumes that are about to be reassigned. +func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) error { + binds := map[string]bool{} + mountPoints := map[string]*volume.MountPoint{} + + // 1. Read already configured mount points. + for name, point := range container.MountPoints { + mountPoints[name] = point + } + + // 2. Read volumes from other containers. + for _, v := range hostConfig.VolumesFrom { + containerID, mode, err := volume.ParseVolumesFrom(v) + if err != nil { + return err + } + + c, err := daemon.GetContainer(containerID) + if err != nil { + return err + } + + for _, m := range c.MountPoints { + cp := &volume.MountPoint{ + Name: m.Name, + Source: m.Source, + RW: m.RW && volume.ReadWrite(mode), + Driver: m.Driver, + Destination: m.Destination, + Propagation: m.Propagation, + Named: m.Named, + } + + if len(cp.Source) == 0 { + v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID) + if err != nil { + return err + } + cp.Volume = v + } + + mountPoints[cp.Destination] = cp + } + } + + // 3. Read bind mounts + for _, b := range hostConfig.Binds { + // #10618 + bind, err := volume.ParseMountSpec(b, hostConfig.VolumeDriver) + if err != nil { + return err + } + + if binds[bind.Destination] { + return fmt.Errorf("Duplicate mount point '%s'", bind.Destination) + } + + if len(bind.Name) > 0 { + // create the volume + v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil) + if err != nil { + return err + } + bind.Volume = v + bind.Source = v.Path() + // bind.Name is an already existing volume, we need to use that here + bind.Driver = v.DriverName() + bind.Named = true + if bind.Driver == "local" { + bind = setBindModeIfNull(bind) + } + } + + if label.RelabelNeeded(bind.Mode) { + if err := label.Relabel(bind.Source, container.MountLabel, label.IsShared(bind.Mode)); err != nil { + return err + } + } + binds[bind.Destination] = true + mountPoints[bind.Destination] = bind + } + + container.Lock() + + // 4. Cleanup old volumes that are about to be reassigned. + for _, m := range mountPoints { + if m.BackwardsCompatible() { + if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { + daemon.volumes.Dereference(mp.Volume, container.ID) + } + } + } + container.MountPoints = mountPoints + + container.Unlock() + + return nil +} + +// lazyInitializeVolume initializes a mountpoint's volume if needed. +// This happens after a daemon restart. +func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error { + if len(m.Driver) > 0 && m.Volume == nil { + v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID) + if err != nil { + return err + } + m.Volume = v + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/volumes_unix.go b/vendor/github.com/docker/docker/daemon/volumes_unix.go new file mode 100644 index 00000000..078fd10b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes_unix.go @@ -0,0 +1,78 @@ +// +build !windows + +package daemon + +import ( + "os" + "sort" + "strconv" + + "github.com/docker/docker/container" + "github.com/docker/docker/volume" +) + +// setupMounts iterates through each of the mount points for a container and +// calls Setup() on each. It also looks to see if is a network mount such as +// /etc/resolv.conf, and if it is not, appends it to the array of mounts. +func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { + var mounts []container.Mount + for _, m := range c.MountPoints { + if err := daemon.lazyInitializeVolume(c.ID, m); err != nil { + return nil, err + } + path, err := m.Setup() + if err != nil { + return nil, err + } + if !c.TrySetNetworkMount(m.Destination, path) { + mnt := container.Mount{ + Source: path, + Destination: m.Destination, + Writable: m.RW, + Propagation: m.Propagation, + } + if m.Volume != nil { + attributes := map[string]string{ + "driver": m.Volume.DriverName(), + "container": c.ID, + "destination": m.Destination, + "read/write": strconv.FormatBool(m.RW), + "propagation": m.Propagation, + } + daemon.LogVolumeEvent(m.Volume.Name(), "mount", attributes) + } + mounts = append(mounts, mnt) + } + } + + mounts = sortMounts(mounts) + netMounts := c.NetworkMounts() + // if we are going to mount any of the network files from container + // metadata, the ownership must be set properly for potential container + // remapped root (user namespaces) + rootUID, rootGID := daemon.GetRemappedUIDGID() + for _, mount := range netMounts { + if err := os.Chown(mount.Source, rootUID, rootGID); err != nil { + return nil, err + } + } + return append(mounts, netMounts...), nil +} + +// sortMounts sorts an array of mounts in lexicographic order. This ensure that +// when mounting, the mounts don't shadow other mounts. For example, if mounting +// /etc and /etc/resolv.conf, /etc/resolv.conf must not be mounted first. +func sortMounts(m []container.Mount) []container.Mount { + sort.Sort(mounts(m)) + return m +} + +// setBindModeIfNull is platform specific processing to ensure the +// shared mode is set to 'z' if it is null. This is called in the case +// of processing a named volume and not a typical bind. +func setBindModeIfNull(bind *volume.MountPoint) *volume.MountPoint { + if bind.Mode == "" { + bind.Mode = "z" + } + return bind +} diff --git a/vendor/github.com/docker/docker/daemon/wait.go b/vendor/github.com/docker/docker/daemon/wait.go new file mode 100644 index 00000000..52b335cd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/wait.go @@ -0,0 +1,17 @@ +package daemon + +import "time" + +// ContainerWait stops processing until the given container is +// stopped. If the container is not found, an error is returned. On a +// successful stop, the exit code of the container is returned. On a +// timeout, an error is returned. If you want to wait forever, supply +// a negative duration for the timeout. +func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return -1, err + } + + return container.WaitStop(timeout) +} diff --git a/vendor/github.com/docker/docker/distribution/errors.go b/vendor/github.com/docker/docker/distribution/errors.go new file mode 100644 index 00000000..f7a9c621 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/errors.go @@ -0,0 +1,113 @@ +package distribution + +import ( + "net/url" + "strings" + "syscall" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/distribution/xfer" +) + +// ErrNoSupport is an error type used for errors indicating that an operation +// is not supported. It encapsulates a more specific error. +type ErrNoSupport struct{ Err error } + +func (e ErrNoSupport) Error() string { + if e.Err == nil { + return "not supported" + } + return e.Err.Error() +} + +// fallbackError wraps an error that can possibly allow fallback to a different +// endpoint. +type fallbackError struct { + // err is the error being wrapped. + err error + // confirmedV2 is set to true if it was confirmed that the registry + // supports the v2 protocol. This is used to limit fallbacks to the v1 + // protocol. + confirmedV2 bool + // transportOK is set to true if we managed to speak HTTP with the + // registry. This confirms that we're using appropriate TLS settings + // (or lack of TLS). + transportOK bool +} + +// Error renders the FallbackError as a string. +func (f fallbackError) Error() string { + return f.err.Error() +} + +// shouldV2Fallback returns true if this error is a reason to fall back to v1. +func shouldV2Fallback(err errcode.Error) bool { + switch err.Code { + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return true + } + return false +} + +// continueOnError returns true if we should fallback to the next endpoint +// as a result of this error. +func continueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + if len(v) == 0 { + return true + } + return continueOnError(v[0]) + case ErrNoSupport: + return continueOnError(v.Err) + case errcode.Error: + return shouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true + case ImageConfigPullError: + return false + case error: + return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true +} + +// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the +// operation after this error. +func retryOnError(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + return retryOnError(v[0]) + } + case errcode.Error: + switch v.Code { + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied: + return xfer.DoNotRetry{Err: err} + } + case *url.Error: + switch v.Err { + case auth.ErrNoBasicAuthCredentials, auth.ErrNoToken: + return xfer.DoNotRetry{Err: v.Err} + } + return retryOnError(v.Err) + case *client.UnexpectedHTTPResponseError: + return xfer.DoNotRetry{Err: err} + case error: + if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { + return xfer.DoNotRetry{Err: err} + } + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return err +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/metadata.go b/vendor/github.com/docker/docker/distribution/metadata/metadata.go new file mode 100644 index 00000000..9f744d46 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/metadata.go @@ -0,0 +1,77 @@ +package metadata + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" +) + +// Store implements a K/V store for mapping distribution-related IDs +// to on-disk layer IDs and image IDs. The namespace identifies the type of +// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. +type Store interface { + // Get retrieves data by namespace and key. + Get(namespace string, key string) ([]byte, error) + // Set writes data indexed by namespace and key. + Set(namespace, key string, value []byte) error + // Delete removes data indexed by namespace and key. + Delete(namespace, key string) error +} + +// FSMetadataStore uses the filesystem to associate metadata with layer and +// image IDs. +type FSMetadataStore struct { + sync.RWMutex + basePath string +} + +// NewFSMetadataStore creates a new filesystem-based metadata store. +func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { + if err := os.MkdirAll(basePath, 0700); err != nil { + return nil, err + } + return &FSMetadataStore{ + basePath: basePath, + }, nil +} + +func (store *FSMetadataStore) path(namespace, key string) string { + return filepath.Join(store.basePath, namespace, key) +} + +// Get retrieves data by namespace and key. The data is read from a file named +// after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { + store.RLock() + defer store.RUnlock() + + return ioutil.ReadFile(store.path(namespace, key)) +} + +// Set writes data indexed by namespace and key. The data is written to a file +// named after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + tempFilePath := path + ".tmp" + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(tempFilePath, value, 0644); err != nil { + return err + } + return os.Rename(tempFilePath, path) +} + +// Delete removes data indexed by namespace and key. The data file named after +// the key, stored in the namespace's directory is deleted. +func (store *FSMetadataStore) Delete(namespace, key string) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + return os.Remove(path) +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go new file mode 100644 index 00000000..f6e45892 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go @@ -0,0 +1,44 @@ +package metadata + +import ( + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" +) + +// V1IDService maps v1 IDs to layers on disk. +type V1IDService struct { + store Store +} + +// NewV1IDService creates a new V1 ID mapping service. +func NewV1IDService(store Store) *V1IDService { + return &V1IDService{ + store: store, + } +} + +// namespace returns the namespace used by this service. +func (idserv *V1IDService) namespace() string { + return "v1id" +} + +// Get finds a layer by its V1 ID. +func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { + if err := v1.ValidateID(v1ID); err != nil { + return layer.DiffID(""), err + } + + idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) + if err != nil { + return layer.DiffID(""), err + } + return layer.DiffID(idBytes), nil +} + +// Set associates an image with a V1 ID. +func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { + if err := v1.ValidateID(v1ID); err != nil { + return err + } + return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go new file mode 100644 index 00000000..239cd1f4 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go @@ -0,0 +1,137 @@ +package metadata + +import ( + "encoding/json" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +// V2MetadataService maps layer IDs to a set of known metadata for +// the layer. +type V2MetadataService struct { + store Store +} + +// V2Metadata contains the digest and source repository information for a layer. +type V2Metadata struct { + Digest digest.Digest + SourceRepository string +} + +// maxMetadata is the number of metadata entries to keep per layer DiffID. +const maxMetadata = 50 + +// NewV2MetadataService creates a new diff ID to v2 metadata mapping service. +func NewV2MetadataService(store Store) *V2MetadataService { + return &V2MetadataService{ + store: store, + } +} + +func (serv *V2MetadataService) diffIDNamespace() string { + return "v2metadata-by-diffid" +} + +func (serv *V2MetadataService) digestNamespace() string { + return "diffid-by-digest" +} + +func (serv *V2MetadataService) diffIDKey(diffID layer.DiffID) string { + return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() +} + +func (serv *V2MetadataService) digestKey(dgst digest.Digest) string { + return string(dgst.Algorithm()) + "/" + dgst.Hex() +} + +// GetMetadata finds the metadata associated with a layer DiffID. +func (serv *V2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { + jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + if err != nil { + return nil, err + } + + var metadata []V2Metadata + if err := json.Unmarshal(jsonBytes, &metadata); err != nil { + return nil, err + } + + return metadata, nil +} + +// GetDiffID finds a layer DiffID from a digest. +func (serv *V2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) + if err != nil { + return layer.DiffID(""), err + } + + return layer.DiffID(diffIDBytes), nil +} + +// Add associates metadata with a layer DiffID. If too many metadata entries are +// present, the oldest one is dropped. +func (serv *V2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + newMetadata = append(newMetadata, metadata) + + if len(newMetadata) > maxMetadata { + newMetadata = newMetadata[len(newMetadata)-maxMetadata:] + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) + if err != nil { + return err + } + + return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) +} + +// Remove unassociates a metadata entry from a layer DiffID. +func (serv *V2MetadataService) Remove(metadata V2Metadata) error { + diffID, err := serv.GetDiffID(metadata.Digest) + if err != nil { + return err + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + if len(newMetadata) == 0 { + return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) +} diff --git a/vendor/github.com/docker/docker/distribution/pull.go b/vendor/github.com/docker/docker/distribution/pull.go new file mode 100644 index 00000000..4b42371b --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull.go @@ -0,0 +1,205 @@ +package distribution + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ImagePullConfig stores pull configuration. +type ImagePullConfig struct { + // MetaHeaders stores HTTP headers with metadata about the image + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the pull + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService *registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // ImageStore manages images. + ImageStore image.Store + // ReferenceStore manages tags. + ReferenceStore reference.Store + // DownloadManager manages concurrent pulls. + DownloadManager *xfer.LayerDownloadManager +} + +// Puller is an interface that abstracts pulling for different API versions. +type Puller interface { + // Pull tries to pull the image referenced by `tag` + // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. + // + Pull(ctx context.Context, ref reference.Named) error +} + +// newPuller returns a Puller interface that will pull from either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 puller will be created. The other parameters are passed +// through to the underlying puller implementation for use during the actual +// pull operation. +func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Puller{ + V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + case registry.APIVersion1: + return &v1Puller{ + v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Pull initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + // makes sure name is not empty or `scratch` + if err := validateRepoName(repoInfo.Name()); err != nil { + return err + } + + endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo.Hostname()) + if err != nil { + return err + } + + var ( + lastErr error + + // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport + // By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in lastErr. + // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of + // any subsequent ErrNoSupport errors in lastErr. + // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be + // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant + // error is the ones from v2 endpoints not v1. + discardNoSupportErrors bool + + // confirmedV2 is set to true if a pull attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + for _, endpoint := range endpoints { + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) + + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + lastErr = err + continue + } + if err := puller.Pull(ctx, ref); err != nil { + // Was this pull cancelled? If so, don't try to fall + // back. + fallback := false + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + fallback = true + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + } + } + if fallback { + if _, ok := err.(ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // append subsequent errors + lastErr = err + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + // append subsequent errors + lastErr = err + } + logrus.Errorf("Attempting next endpoint for pull after error: %v", err) + continue + } + logrus.Errorf("Not continuing with pull after error: %v", err) + return err + } + + imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", ref.String()) + } + + return lastErr +} + +// writeStatus writes a status message to out. If layersDownloaded is true, the +// status message indicates that a newer image was downloaded. Otherwise, it +// indicates that the image is up to date. requestedTag is the tag the message +// will refer to. +func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { + if layersDownloaded { + progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) + } else { + progress.Message(out, "", "Status: Image is up to date for "+requestedTag) + } +} + +// validateRepoName validates the name of a repository. +func validateRepoName(name string) error { + if name == "" { + return fmt.Errorf("Repository name can't be empty") + } + if name == api.NoBaseImageSpecifier { + return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) + } + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v1.go b/vendor/github.com/docker/docker/distribution/pull_v1.go new file mode 100644 index 00000000..86fad2ef --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v1.go @@ -0,0 +1,362 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Puller struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + session *registry.Session +} + +func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + // Allowing fallback, because HTTPS v1 is before HTTP v2 + return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} + } + + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was ReceiveTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + logrus.Debugf("Fallback from error: %s", err) + return fallbackError{err: err} + } + if err := p.pullRepository(ctx, ref); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + progress.Message(p.config.ProgressOutput, "", p.repoInfo.FullName()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") + + return nil +} + +func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error { + progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.FullName()) + + repoData, err := p.session.GetRepositoryData(p.repoInfo) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + return fmt.Errorf("Error: image %s not found", p.repoInfo.RemoteName()) + } + // Unexpected HTTP error + return err + } + + logrus.Debugf("Retrieving the tag list") + var tagsList map[string]string + tagged, isTagged := ref.(reference.NamedTagged) + if !isTagged { + tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo) + } else { + var tagID string + tagsList = make(map[string]string) + tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo, tagged.Tag()) + if err == registry.ErrRepoNotFound { + return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.FullName()) + } + tagsList[tagged.Tag()] = tagID + } + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + layersDownloaded := false + for _, imgData := range repoData.ImgList { + if isTagged && imgData.Tag != tagged.Tag() { + continue + } + + err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded) + if err != nil { + return err + } + } + + writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + return nil +} + +func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error { + if img.Tag == "" { + logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + return nil + } + + localNameRef, err := reference.WithTag(p.repoInfo, img.Tag) + if err != nil { + retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) + logrus.Debug(retErr.Error()) + return retErr + } + + if err := v1.ValidateID(img.ID); err != nil { + return err + } + + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.FullName()) + success := false + var lastErr error + for _, ep := range p.repoInfo.Index.Mirrors { + ep += "v1/" + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.FullName(), ep)) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // Don't report errors when pulling from mirrors. + logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + continue + } + success = true + break + } + if !success { + for _, ep := range repoData.Endpoints { + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.FullName(), ep) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + continue + } + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.FullName(), lastErr) + progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) + return err + } + return nil +} + +func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) { + var history []string + history, err = p.session.GetRemoteHistory(v1ID, endpoint) + if err != nil { + return err + } + if len(history) < 1 { + return fmt.Errorf("empty history for image %s", v1ID) + } + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers") + + var ( + descriptors []xfer.DownloadDescriptor + newHistory []image.History + imgJSON []byte + imgSize int64 + ) + + // Iterate over layers, in order from bottom-most to top-most. Download + // config for all layers and create descriptors. + for i := len(history) - 1; i >= 0; i-- { + v1LayerID := history[i] + imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint) + if err != nil { + return err + } + + // Create a new-style config from the legacy configs + h, err := v1.HistoryFromConfig(imgJSON, false) + if err != nil { + return err + } + newHistory = append(newHistory, h) + + layerDescriptor := &v1LayerDescriptor{ + v1LayerID: v1LayerID, + indexName: p.repoInfo.Index.Name, + endpoint: endpoint, + v1IDService: p.v1IDService, + layersDownloaded: layersDownloaded, + layerSize: imgSize, + session: p.session, + } + + descriptors = append(descriptors, layerDescriptor) + } + + rootFS := image.NewRootFS() + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return err + } + defer release() + + config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory) + if err != nil { + return err + } + + imageID, err := p.config.ImageStore.Create(config) + if err != nil { + return err + } + + if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil { + return err + } + + return nil +} + +func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata") + + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) + if err != nil && j == retries { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata") + return nil, 0, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + + return imgJSON, imgSize, nil + } + + // not reached + return nil, 0, nil +} + +type v1LayerDescriptor struct { + v1LayerID string + indexName string + endpoint string + v1IDService *metadata.V1IDService + layersDownloaded *bool + layerSize int64 + session *registry.Session + tmpFile *os.File +} + +func (ld *v1LayerDescriptor) Key() string { + return "v1:" + ld.v1LayerID +} + +func (ld *v1LayerDescriptor) ID() string { + return stringid.TruncateID(ld.v1LayerID) +} + +func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.v1IDService.Get(ld.v1LayerID, ld.indexName) +} + +func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + progress.Update(progressOutput, ld.ID(), "Pulling fs layer") + layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize) + if err != nil { + progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers") + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() { + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + *ld.layersDownloaded = true + + ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob") + if err != nil { + layerReader.Close() + return nil, 0, err + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading") + defer reader.Close() + + _, err = io.Copy(ld.tmpFile, reader) + if err != nil { + ld.Close() + return nil, 0, err + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name()) + + ld.tmpFile.Seek(0, 0) + + // hand off the temporary file to the download manager, so it will only + // be closed once + tmpFile := ld.tmpFile + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), ld.layerSize, nil +} + +func (ld *v1LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile = nil + } +} + +func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID) +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2.go b/vendor/github.com/docker/docker/distribution/pull_v2.go new file mode 100644 index 00000000..748ef422 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2.go @@ -0,0 +1,840 @@ +package distribution + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +var errRootFSMismatch = errors.New("layers from manifest don't match image configuration") + +// ImageConfigPullError is an error pulling the image config blob +// (only applies to schema2). +type ImageConfigPullError struct { + Err error +} + +// Error returns the error string for ImageConfigPullError. +func (e ImageConfigPullError) Error() string { + return "error pulling image configuration: " + e.Err.Error() +} + +type v2Puller struct { + V2MetadataService *metadata.V2MetadataService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + repo distribution.Repository + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { + // TODO(tiborvass): was ReceiveTimeout + p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + logrus.Warnf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pullV2Repository(ctx, ref); err != nil { + if _, ok := err.(fallbackError); ok { + return err + } + if continueOnError(err) { + logrus.Errorf("Error trying v2 registry: %v", err) + return fallbackError{ + err: err, + confirmedV2: p.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { + var layersDownloaded bool + if !reference.IsNameOnly(ref) { + layersDownloaded, err = p.pullV2Tag(ctx, ref) + if err != nil { + return err + } + } else { + tags, err := p.repo.Tags(ctx).All(ctx) + if err != nil { + // If this repository doesn't exist on V2, we should + // permit a fallback to V1. + return allowV1Fallback(err) + } + + // The v2 registry knows about this repository, so we will not + // allow fallback to the v1 protocol even if we encounter an + // error later on. + p.confirmedV2 = true + + for _, tag := range tags { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + return err + } + pulledNew, err := p.pullV2Tag(ctx, tagRef) + if err != nil { + // Since this is the pull-all-tags case, don't + // allow an error pulling a particular tag to + // make the whole pull fall back to v1. + if fallbackErr, ok := err.(fallbackError); ok { + return fallbackErr.err + } + return err + } + // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged + // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? + layersDownloaded = layersDownloaded || pulledNew + } + } + + writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + + return nil +} + +type v2LayerDescriptor struct { + digest digest.Digest + repoInfo *registry.RepositoryInfo + repo distribution.Repository + V2MetadataService *metadata.V2MetadataService + tmpFile *os.File + verifier digest.Verifier +} + +func (ld *v2LayerDescriptor) Key() string { + return "v2:" + ld.digest.String() +} + +func (ld *v2LayerDescriptor) ID() string { + return stringid.TruncateID(ld.digest.String()) +} + +func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.V2MetadataService.GetDiffID(ld.digest) +} + +func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + logrus.Debugf("pulling blob %q", ld.digest) + + var ( + err error + offset int64 + ) + + if ld.tmpFile == nil { + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else { + offset, err = ld.tmpFile.Seek(0, os.SEEK_END) + if err != nil { + logrus.Debugf("error seeking to end of download file: %v", err) + offset = 0 + + ld.tmpFile.Close() + if err := os.Remove(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else if offset != 0 { + logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) + } + } + + tmpFile := ld.tmpFile + blobs := ld.repo.Blobs(ctx) + + layerDownload, err := blobs.Open(ctx, ld.digest) + if err != nil { + logrus.Errorf("Error initiating layer download: %v", err) + if err == distribution.ErrBlobUnknown { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, retryOnError(err) + } + + if offset != 0 { + _, err := layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + } + size, err := layerDownload.Seek(0, os.SEEK_END) + if err != nil { + // Seek failed, perhaps because there was no Content-Length + // header. This shouldn't fail the download, because we can + // still continue without a progress bar. + size = 0 + } else { + if size != 0 && offset > size { + logrus.Debugf("Partial download is larger than full blob. Starting over") + offset = 0 + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + // Restore the seek offset either at the beginning of the + // stream, or just after the last byte we have from previous + // attempts. + _, err = layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + return nil, 0, err + } + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") + defer reader.Close() + + if ld.verifier == nil { + ld.verifier, err = digest.NewDigestVerifier(ld.digest) + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) + if err != nil { + if err == transport.ErrWrongCodeForByteRange { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + return nil, 0, retryOnError(err) + } + + progress.Update(progressOutput, ld.ID(), "Verifying Checksum") + + if !ld.verifier.Verified() { + err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) + logrus.Error(err) + + // Allow a retry if this digest verification error happened + // after a resumed download. + if offset != 0 { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) + + _, err = tmpFile.Seek(0, os.SEEK_SET) + if err != nil { + tmpFile.Close() + if err := os.Remove(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + ld.tmpFile = nil + ld.verifier = nil + return nil, 0, xfer.DoNotRetry{Err: err} + } + + // hand off the temporary file to the download manager, so it will only + // be closed once + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), size, nil +} + +func (ld *v2LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + } +} + +func (ld *v2LayerDescriptor) truncateDownloadFile() error { + // Need a new hash context since we will be redoing the download + ld.verifier = nil + + if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil { + logrus.Errorf("error seeking to beginning of download file: %v", err) + return err + } + + if err := ld.tmpFile.Truncate(0); err != nil { + logrus.Errorf("error truncating download file: %v", err) + return err + } + + return nil +} + +func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()}) +} + +func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return false, err + } + + var ( + manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + ) + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + // NOTE: not using TagService.Get, since it uses HEAD requests + // against the manifests endpoint, which are not supported by + // all registry versions. + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return false, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() + } else if digested, isDigested := ref.(reference.Canonical); isDigested { + manifest, err = manSvc.Get(ctx, digested.Digest()) + if err != nil { + return false, err + } + tagOrDigest = digested.Digest().String() + } else { + return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) + } + + if manifest == nil { + return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + + // If manSvc.Get succeeded, we can be confident that the registry on + // the other side speaks the v2 protocol. + p.confirmedV2 = true + + logrus.Debugf("Pulling ref from V2 registry: %s", ref.String()) + progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name()) + + var ( + imageID image.ID + manifestDigest digest.Digest + ) + + switch v := manifest.(type) { + case *schema1.SignedManifest: + imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v) + if err != nil { + return false, err + } + case *schema2.DeserializedManifest: + imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v) + if err != nil { + return false, err + } + case *manifestlist.DeserializedManifestList: + imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v) + if err != nil { + return false, err + } + default: + return false, errors.New("unsupported manifest format") + } + + progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) + + oldTagImageID, err := p.config.ReferenceStore.Get(ref) + if err == nil { + if oldTagImageID == imageID { + return false, nil + } + } else if err != reference.ErrDoesNotExist { + return false, err + } + + if canonical, ok := ref.(reference.Canonical); ok { + if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil { + return false, err + } + } else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil { + return false, err + } + + return true, nil +} + +func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) { + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) + if err != nil { + return "", "", err + } + + rootFS := image.NewRootFS() + + if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil { + return "", "", err + } + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return "", "", err + } + + var descriptors []xfer.DownloadDescriptor + + // Image history converted to the new format + var history []image.History + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + blobSum := verifiedManifest.FSLayers[i].BlobSum + + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return "", "", err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return "", "", err + } + history = append(history, h) + + if throwAway.ThrowAway { + continue + } + + layerDescriptor := &v2LayerDescriptor{ + digest: blobSum, + repoInfo: p.repoInfo, + repo: p.repo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return "", "", err + } + defer release() + + config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) + if err != nil { + return "", "", err + } + + imageID, err = p.config.ImageStore.Create(config) + if err != nil { + return "", "", err + } + + manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) + + return imageID, manifestDigest, nil +} + +func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) { + manifestDigest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return "", "", err + } + + target := mfst.Target() + imageID = image.ID(target.Digest) + if _, err := p.config.ImageStore.Get(imageID); err == nil { + // If the image already exists locally, no need to pull + // anything. + return imageID, manifestDigest, nil + } + + configChan := make(chan []byte, 1) + errChan := make(chan error, 1) + var cancel func() + ctx, cancel = context.WithCancel(ctx) + + // Pull the image config + go func() { + configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest) + if err != nil { + errChan <- ImageConfigPullError{Err: err} + cancel() + return + } + configChan <- configJSON + }() + + var descriptors []xfer.DownloadDescriptor + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for _, d := range mfst.References() { + layerDescriptor := &v2LayerDescriptor{ + digest: d.Digest, + repo: p.repo, + repoInfo: p.repoInfo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + var ( + configJSON []byte // raw serialized image config + unmarshalledConfig image.Image // deserialized image config + downloadRootFS image.RootFS // rootFS to use for registering layers. + ) + if runtime.GOOS == "windows" { + configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) + if err != nil { + return "", "", err + } + if unmarshalledConfig.RootFS == nil { + return "", "", errors.New("image config has no rootfs section") + } + downloadRootFS = *unmarshalledConfig.RootFS + downloadRootFS.DiffIDs = []layer.DiffID{} + } else { + downloadRootFS = *image.NewRootFS() + } + + rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) + if err != nil { + if configJSON != nil { + // Already received the config + return "", "", err + } + select { + case err = <-errChan: + return "", "", err + default: + cancel() + select { + case <-configChan: + case <-errChan: + } + return "", "", err + } + } + defer release() + + if configJSON == nil { + configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) + if err != nil { + return "", "", err + } + } + + // The DiffIDs returned in rootFS MUST match those in the config. + // Otherwise the image config could be referencing layers that aren't + // included in the manifest. + if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + for i := range rootFS.DiffIDs { + if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] { + return "", "", errRootFSMismatch + } + } + + imageID, err = p.config.ImageStore.Create(configJSON) + if err != nil { + return "", "", err + } + + return imageID, manifestDigest, nil +} + +func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) { + select { + case configJSON := <-configChan: + var unmarshalledConfig image.Image + if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil { + return nil, image.Image{}, err + } + return configJSON, unmarshalledConfig, nil + case err := <-errChan: + return nil, image.Image{}, err + // Don't need a case for ctx.Done in the select because cancellation + // will trigger an error in p.pullSchema2ImageConfig. + } +} + +// pullManifestList handles "manifest lists" which point to various +// platform-specifc manifests. +func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (imageID image.ID, manifestListDigest digest.Digest, err error) { + manifestListDigest, err = schema2ManifestDigest(ref, mfstList) + if err != nil { + return "", "", err + } + + var manifestDigest digest.Digest + for _, manifestDescriptor := range mfstList.Manifests { + // TODO(aaronl): The manifest list spec supports optional + // "features" and "variant" fields. These are not yet used. + // Once they are, their values should be interpreted here. + if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { + manifestDigest = manifestDescriptor.Digest + break + } + } + + if manifestDigest == "" { + return "", "", errors.New("no supported platform found in manifest list") + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return "", "", err + } + + manifest, err := manSvc.Get(ctx, manifestDigest) + if err != nil { + return "", "", err + } + + manifestRef, err := reference.WithDigest(ref, manifestDigest) + if err != nil { + return "", "", err + } + + switch v := manifest.(type) { + case *schema1.SignedManifest: + imageID, _, err = p.pullSchema1(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + case *schema2.DeserializedManifest: + imageID, _, err = p.pullSchema2(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + default: + return "", "", errors.New("unsupported manifest format") + } + + return imageID, manifestListDigest, err +} + +func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { + blobs := p.repo.Blobs(ctx) + configJSON, err = blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + // Verify image config digest + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return nil, err + } + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image config verification failed for digest %s", dgst) + logrus.Error(err) + return nil, err + } + + return configJSON, nil +} + +// schema2ManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { + _, canonical, err := mfst.Payload() + if err != nil { + return "", err + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return "", err + } + if _, err := verifier.Write(canonical); err != nil { + return "", err + } + if !verifier.Verified() { + err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return "", err + } + return digested.Digest(), nil + } + + return digest.FromBytes(canonical), nil +} + +// allowV1Fallback checks if the error is a possible reason to fallback to v1 +// (even if confirmedV2 has been set already), and if so, wraps the error in +// a fallbackError with confirmedV2 set to false. Otherwise, it returns the +// error unmodified. +func allowV1Fallback(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + } + case errcode.Error: + if shouldV2Fallback(v) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + case *url.Error: + if v.Err == auth.ErrNoBasicAuthCredentials { + return fallbackError{err: err, confirmedV2: false} + } + } + + return err +} + +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isCanonical := ref.(reference.Canonical); isCanonical { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return nil, err + } + if _, err := verifier.Write(signedManifest.Canonical); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + } + m = &signedManifest.Manifest + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String()) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) + } + return m, nil +} + +// fixManifestLayers removes repeated layers from the manifest and checks the +// correctness of the parent chain. +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("Invalid parent ID in the base layer of the image.") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} + +func createDownloadFile() (*os.File, error) { + return ioutil.TempFile("", "GetImageBlob") +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_unix.go b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go new file mode 100644 index 00000000..9fbb875e --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package distribution + +import ( + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/docker/image" +) + +func detectBaseLayer(is image.Store, m *schema1.Manifest, rootFS *image.RootFS) error { + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/push.go b/vendor/github.com/docker/docker/distribution/push.go new file mode 100644 index 00000000..52ee8e77 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push.go @@ -0,0 +1,219 @@ +package distribution + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +// ImagePushConfig stores push configuration. +type ImagePushConfig struct { + // MetaHeaders store HTTP headers with metadata about the image + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the push + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService *registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // LayerStore manages layers. + LayerStore layer.Store + // ImageStore manages images. + ImageStore image.Store + // ReferenceStore manages tags. + ReferenceStore reference.Store + // TrustKey is the private key for legacy signatures. This is typically + // an ephemeral key, since these signatures are no longer verified. + TrustKey libtrust.PrivateKey + // UploadManager dispatches uploads. + UploadManager *xfer.LayerUploadManager +} + +// Pusher is an interface that abstracts pushing for different API versions. +type Pusher interface { + // Push tries to push the image configured at the creation of Pusher. + // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. + // + // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. + Push(ctx context.Context) error +} + +const compressionBufSize = 32768 + +// NewPusher creates a new Pusher interface that will push to either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 pusher will be created. The other parameters are passed +// through to the underlying pusher implementation for use during the actual +// push operation. +func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Pusher{ + v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + case registry.APIVersion1: + return &v1Pusher{ + v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Push initiates a push operation on the repository named localName. +// ref is the specific variant of the image to be pushed. +// If no tag is provided, all tags will be pushed. +func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { + // FIXME: Allow to interrupt current push when new push of same image is done. + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(repoInfo.Hostname()) + if err != nil { + return err + } + + progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.FullName()) + + associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo) + if len(associations) == 0 { + return fmt.Errorf("Repository does not exist: %s", repoInfo.Name()) + } + + var ( + lastErr error + + // confirmedV2 is set to true if a push attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + + for _, endpoint := range endpoints { + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to push %s to %s %s", repoInfo.FullName(), endpoint.URL, endpoint.Version) + + pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) + if err != nil { + lastErr = err + continue + } + if err := pusher.Push(ctx); err != nil { + // Was this push cancelled? If so, don't try to fall + // back. + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + lastErr = err + logrus.Errorf("Attempting next endpoint for push after error: %v", err) + continue + } + } + + logrus.Errorf("Not continuing with push after error: %v", err) + return err + } + + imagePushConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "push") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.FullName()) + } + return lastErr +} + +// compress returns an io.ReadCloser which will supply a compressed version of +// the provided Reader. The caller must close the ReadCloser after reading the +// compressed data. +// +// Note that this function returns a reader instead of taking a writer as an +// argument so that it can be used with httpBlobWriter's ReadFrom method. +// Using httpBlobWriter's Write method would send a PATCH request for every +// Write call. +// +// The second return value is a channel that gets closed when the goroutine +// is finished. This allows the caller to make sure the goroutine finishes +// before it releases any resources connected with the reader that was +// passed in. +func compress(in io.Reader) (io.ReadCloser, chan struct{}) { + compressionDone := make(chan struct{}) + + pipeReader, pipeWriter := io.Pipe() + // Use a bufio.Writer to avoid excessive chunking in HTTP request. + bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) + compressor := gzip.NewWriter(bufWriter) + + go func() { + _, err := io.Copy(compressor, in) + if err == nil { + err = compressor.Close() + } + if err == nil { + err = bufWriter.Flush() + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + close(compressionDone) + }() + + return pipeReader, compressionDone +} diff --git a/vendor/github.com/docker/docker/distribution/push_v1.go b/vendor/github.com/docker/docker/distribution/push_v1.go new file mode 100644 index 00000000..b6e4a130 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push_v1.go @@ -0,0 +1,454 @@ +package distribution + +import ( + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Pusher struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + ref reference.Named + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + session *registry.Session +} + +func (p *v1Pusher) Push(ctx context.Context) error { + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was NoTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + return fallbackError{err: err} + } + if err := p.pushRepository(ctx); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + return nil +} + +// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an +// image being pushed to a v1 registry. +type v1Image interface { + Config() []byte + Layer() layer.Layer + V1ID() string +} + +type v1ImageCommon struct { + layer layer.Layer + config []byte + v1ID string +} + +func (common *v1ImageCommon) Config() []byte { + return common.config +} + +func (common *v1ImageCommon) V1ID() string { + return common.v1ID +} + +func (common *v1ImageCommon) Layer() layer.Layer { + return common.layer +} + +// v1TopImage defines a runnable (top layer) image being pushed to a v1 +// registry. +type v1TopImage struct { + v1ImageCommon + imageID image.ID +} + +func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { + v1ID := digest.Digest(imageID).Hex() + parentV1ID := "" + if parent != nil { + parentV1ID = parent.V1ID() + } + + config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) + if err != nil { + return nil, err + } + + return &v1TopImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: config, + layer: l, + }, + imageID: imageID, + }, nil +} + +// v1DependencyImage defines a dependency layer being pushed to a v1 registry. +type v1DependencyImage struct { + v1ImageCommon +} + +func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1DependencyImage, error) { + v1ID := digest.Digest(l.ChainID()).Hex() + + config := "" + if parent != nil { + config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) + } else { + config = fmt.Sprintf(`{"id":"%s"}`, v1ID) + } + return &v1DependencyImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: []byte(config), + layer: l, + }, + }, nil +} + +// Retrieve the all the images to be uploaded in the correct order +func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []layer.Layer, err error) { + tagsByImage = make(map[image.ID][]string) + + // Ignore digest references + if _, isCanonical := p.ref.(reference.Canonical); isCanonical { + return + } + + tagged, isTagged := p.ref.(reference.NamedTagged) + if isTagged { + // Push a specific tag + var imgID image.ID + imgID, err = p.config.ReferenceStore.Get(p.ref) + if err != nil { + return + } + + imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) + if err != nil { + return + } + + tagsByImage[imgID] = []string{tagged.Tag()} + + return + } + + imagesSeen := make(map[image.ID]struct{}) + dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) + + associations := p.config.ReferenceStore.ReferencesByName(p.ref) + for _, association := range associations { + if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged { + // Ignore digest references. + continue + } + + tagsByImage[association.ImageID] = append(tagsByImage[association.ImageID], tagged.Tag()) + + if _, present := imagesSeen[association.ImageID]; present { + // Skip generating image list for already-seen image + continue + } + imagesSeen[association.ImageID] = struct{}{} + + imageListForThisTag, err := p.imageListForTag(association.ImageID, dependenciesSeen, &referencedLayers) + if err != nil { + return nil, nil, nil, err + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + logrus.Debugf("Image list: %v", imageList) + logrus.Debugf("Tags by image: %v", tagsByImage) + + return +} + +func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]layer.Layer) (imageListForThisTag []v1Image, err error) { + img, err := p.config.ImageStore.Get(imgID) + if err != nil { + return nil, err + } + + topLayerID := img.RootFS.ChainID() + + var l layer.Layer + if topLayerID == "" { + l = layer.EmptyLayer + } else { + l, err = p.config.LayerStore.Get(topLayerID) + *referencedLayers = append(*referencedLayers, l) + if err != nil { + return nil, fmt.Errorf("failed to get top layer from image: %v", err) + } + } + + dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen) + if err != nil { + return nil, err + } + + topImage, err := newV1TopImage(imgID, img, l, parent) + if err != nil { + return nil, err + } + + imageListForThisTag = append(dependencyImages, topImage) + + return +} + +func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage, err error) { + if l == nil { + return nil, nil, nil + } + + imageListForThisTag, parent, err = generateDependencyImages(l.Parent(), dependenciesSeen) + + if dependenciesSeen != nil { + if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { + // This layer is already on the list, we can ignore it + // and all its parents. + return imageListForThisTag, dependencyImage, nil + } + } + + dependencyImage, err := newV1DependencyImage(l, parent) + if err != nil { + return nil, nil, err + } + imageListForThisTag = append(imageListForThisTag, dependencyImage) + + if dependenciesSeen != nil { + dependenciesSeen[l.ChainID()] = dependencyImage + } + + return imageListForThisTag, dependencyImage, nil +} + +// createImageIndex returns an index of an image's layer IDs and tags. +func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { + var imageIndex []*registry.ImgData + for _, img := range images { + v1ID := img.V1ID() + + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + if tags, hasTags := tags[topImage.imageID]; hasTags { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: tag, + }) + } + continue + } + } + + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is associated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: "", + }) + } + return imageIndex +} + +// lookupImageOnEndpoint checks the specified endpoint to see if an image exists +// and if it is absent then it sends the image id to the channel to be pushed. +func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { + defer wg.Done() + for image := range images { + v1ID := image.V1ID() + truncID := stringid.TruncateID(image.Layer().DiffID().String()) + if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { + logrus.Errorf("Error in LookupRemoteImage: %s", err) + imagesToPush <- v1ID + progress.Update(p.config.ProgressOutput, truncID, "Waiting") + } else { + progress.Update(p.config.ProgressOutput, truncID, "Already exists") + } + } +} + +func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { + workerCount := len(imageList) + // start a maximum of 5 workers to check if images exist on the specified endpoint. + if workerCount > 5 { + workerCount = 5 + } + var ( + wg = &sync.WaitGroup{} + imageData = make(chan v1Image, workerCount*2) + imagesToPush = make(chan string, workerCount*2) + pushes = make(chan map[string]struct{}, 1) + ) + for i := 0; i < workerCount; i++ { + wg.Add(1) + go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) + } + // start a go routine that consumes the images to push + go func() { + shouldPush := make(map[string]struct{}) + for id := range imagesToPush { + shouldPush[id] = struct{}{} + } + pushes <- shouldPush + }() + for _, v1Image := range imageList { + imageData <- v1Image + } + // close the channel to notify the workers that there will be no more images to check. + close(imageData) + wg.Wait() + close(imagesToPush) + // wait for all the images that require pushes to be collected into a consumable map. + shouldPush := <-pushes + // finish by pushing any images and tags to the endpoint. The order that the images are pushed + // is very important that is why we are still iterating over the ordered list of imageIDs. + for _, img := range imageList { + v1ID := img.V1ID() + if _, push := shouldPush[v1ID]; push { + if _, err := p.pushImage(ctx, img, endpoint); err != nil { + // FIXME: Continue on error? + return err + } + } + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + for _, tag := range tags[topImage.imageID] { + progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+p.repoInfo.RemoteName()+"/tags/"+tag) + if err := p.session.PushRegistryTag(p.repoInfo, v1ID, tag, endpoint); err != nil { + return err + } + } + } + } + return nil +} + +// pushRepository pushes layers that do not already exist on the registry. +func (p *v1Pusher) pushRepository(ctx context.Context) error { + imgList, tags, referencedLayers, err := p.getImageList() + defer func() { + for _, l := range referencedLayers { + p.config.LayerStore.Release(l) + } + }() + if err != nil { + return err + } + + imageIndex := createImageIndex(imgList, tags) + for _, data := range imageIndex { + logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err := p.session.PushImageJSONIndex(p.repoInfo, imageIndex, false, nil) + if err != nil { + return err + } + // push the repository to each of the endpoints only if it does not exist. + for _, endpoint := range repoData.Endpoints { + if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil { + return err + } + } + _, err = p.session.PushImageJSONIndex(p.repoInfo, imageIndex, true, repoData.Endpoints) + return err +} + +func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) { + l := v1Image.Layer() + v1ID := v1Image.V1ID() + truncID := stringid.TruncateID(l.DiffID().String()) + + jsonRaw := v1Image.Config() + progress.Update(p.config.ProgressOutput, truncID, "Pushing") + + // General rule is to use ID for graph accesses and compatibilityID for + // calls to session.registry() + imgData := ®istry.ImgData{ + ID: v1ID, + } + + // Send the json + if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { + if err == registry.ErrAlreadyExists { + progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping") + return "", nil + } + return "", err + } + + arch, err := l.TarStream() + if err != nil { + return "", err + } + defer arch.Close() + + // don't care if this fails; best effort + size, _ := l.DiffSize() + + // Send the layer + logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing") + defer reader.Close() + + checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { + return "", err + } + + if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil { + logrus.Warnf("Could not set v1 ID mapping: %v", err) + } + + progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed") + return imgData.Checksum, nil +} diff --git a/vendor/github.com/docker/docker/distribution/push_v2.go b/vendor/github.com/docker/docker/distribution/push_v2.go new file mode 100644 index 00000000..e86badb4 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push_v2.go @@ -0,0 +1,438 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest digest.Digest + Size int +} + +type v2Pusher struct { + v2MetadataService *metadata.V2MetadataService + ref reference.Named + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + repo distribution.Repository + + // pushState is state built by the Upload functions. + pushState pushState +} + +type pushState struct { + sync.Mutex + // remoteLayers is the set of layers known to exist on the remote side. + // This avoids redundant queries when pushing multiple tags that + // involve the same layers. It is also used to fill in digest and size + // information when building the manifest. + remoteLayers map[layer.DiffID]distribution.Descriptor + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Pusher) Push(ctx context.Context) (err error) { + p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) + + p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pushV2Repository(ctx); err != nil { + if continueOnError(err) { + return fallbackError{ + err: err, + confirmedV2: p.pushState.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { + if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { + imageID, err := p.config.ReferenceStore.Get(p.ref) + if err != nil { + return fmt.Errorf("tag does not exist: %s", p.ref.String()) + } + + return p.pushV2Tag(ctx, namedTagged, imageID) + } + + if !reference.IsNameOnly(p.ref) { + return errors.New("cannot push a digest reference") + } + + // Pull all tags + pushed := 0 + for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { + if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { + pushed++ + if err := p.pushV2Tag(ctx, namedTagged, association.ImageID); err != nil { + return err + } + } + } + + if pushed == 0 { + return fmt.Errorf("no tags to push for %s", p.repoInfo.Name()) + } + + return nil +} + +func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, imageID image.ID) error { + logrus.Debugf("Pushing repository: %s", ref.String()) + + img, err := p.config.ImageStore.Get(imageID) + if err != nil { + return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) + } + + var l layer.Layer + + topLayerID := img.RootFS.ChainID() + if topLayerID == "" { + l = layer.EmptyLayer + } else { + l, err = p.config.LayerStore.Get(topLayerID) + if err != nil { + return fmt.Errorf("failed to get top layer from image: %v", err) + } + defer layer.ReleaseAndLog(p.config.LayerStore, l) + } + + var descriptors []xfer.UploadDescriptor + + descriptorTemplate := v2PushDescriptor{ + v2MetadataService: p.v2MetadataService, + repoInfo: p.repoInfo, + repo: p.repo, + pushState: &p.pushState, + } + + // Loop bounds condition is to avoid pushing the base layer on Windows. + for i := 0; i < len(img.RootFS.DiffIDs); i++ { + descriptor := descriptorTemplate + descriptor.layer = l + descriptors = append(descriptors, &descriptor) + + l = l.Parent() + } + + if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { + return err + } + + // Try schema2 first + builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), img.RawJSON()) + manifest, err := manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return err + } + + putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) + + manifestRef, err := distreference.WithTag(p.repo.Named(), ref.Tag()) + if err != nil { + return err + } + builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, img.RawJSON()) + manifest, err = manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + return err + } + } + + var canonicalManifest []byte + + switch v := manifest.(type) { + case *schema1.SignedManifest: + canonicalManifest = v.Canonical + case *schema2.DeserializedManifest: + _, canonicalManifest, err = v.Payload() + if err != nil { + return err + } + } + + manifestDigest := digest.FromBytes(canonicalManifest) + progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) + // Signal digest to the trust client so it can sign the + // push, if appropriate. + progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)}) + + return nil +} + +func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { + // descriptors is in reverse order; iterate backwards to get references + // appended in the right order. + for i := len(descriptors) - 1; i >= 0; i-- { + if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { + return nil, err + } + } + + return builder.Build(ctx) +} + +type v2PushDescriptor struct { + layer layer.Layer + v2MetadataService *metadata.V2MetadataService + repoInfo reference.Named + repo distribution.Repository + pushState *pushState + remoteDescriptor distribution.Descriptor +} + +func (pd *v2PushDescriptor) Key() string { + return "v2push:" + pd.repo.Named().Name() + " " + pd.layer.DiffID().String() +} + +func (pd *v2PushDescriptor) ID() string { + return stringid.TruncateID(pd.layer.DiffID().String()) +} + +func (pd *v2PushDescriptor) DiffID() layer.DiffID { + return pd.layer.DiffID() +} + +func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + diffID := pd.DiffID() + + pd.pushState.Lock() + if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { + // it is already known that the push is not needed and + // therefore doing a stat is unnecessary + pd.pushState.Unlock() + progress.Update(progressOutput, pd.ID(), "Layer already exists") + return descriptor, nil + } + pd.pushState.Unlock() + + // Do we have any metadata associated with this layer's DiffID? + v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) + if err == nil { + descriptor, exists, err := layerAlreadyExists(ctx, v2Metadata, pd.repoInfo, pd.repo, pd.pushState) + if err != nil { + progress.Update(progressOutput, pd.ID(), "Image push failed") + return distribution.Descriptor{}, retryOnError(err) + } + if exists { + progress.Update(progressOutput, pd.ID(), "Layer already exists") + pd.pushState.Lock() + pd.pushState.remoteLayers[diffID] = descriptor + pd.pushState.Unlock() + return descriptor, nil + } + } + + logrus.Debugf("Pushing layer: %s", diffID) + + // if digest was empty or not saved, or if blob does not exist on the remote repository, + // then push the blob. + bs := pd.repo.Blobs(ctx) + + var layerUpload distribution.BlobWriter + mountAttemptsRemaining := 3 + + // Attempt to find another repository in the same registry to mount the layer + // from to avoid an unnecessary upload. + // Note: metadata is stored from oldest to newest, so we iterate through this + // slice in reverse to maximize our chances of the blob still existing in the + // remote repository. + for i := len(v2Metadata) - 1; i >= 0 && mountAttemptsRemaining > 0; i-- { + mountFrom := v2Metadata[i] + + sourceRepo, err := reference.ParseNamed(mountFrom.SourceRepository) + if err != nil { + continue + } + if pd.repoInfo.Hostname() != sourceRepo.Hostname() { + // don't mount blobs from another registry + continue + } + + namedRef, err := reference.WithName(mountFrom.SourceRepository) + if err != nil { + continue + } + + // TODO (brianbland): We need to construct a reference where the Name is + // only the full remote name, so clean this up when distribution has a + // richer reference package + remoteRef, err := distreference.WithName(namedRef.RemoteName()) + if err != nil { + continue + } + + canonicalRef, err := distreference.WithDigest(remoteRef, mountFrom.Digest) + if err != nil { + continue + } + + logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountFrom.Digest, sourceRepo.FullName()) + + layerUpload, err = bs.Create(ctx, client.WithMountFrom(canonicalRef)) + switch err := err.(type) { + case distribution.ErrBlobMounted: + progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) + + err.Descriptor.MediaType = schema2.MediaTypeLayer + + pd.pushState.Lock() + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = err.Descriptor + pd.pushState.Unlock() + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + return err.Descriptor, nil + case nil: + // blob upload session created successfully, so begin the upload + mountAttemptsRemaining = 0 + default: + // unable to mount layer from this repository, so this source mapping is no longer valid + logrus.Debugf("unassociating layer %s (%s) with %s", diffID, mountFrom.Digest, mountFrom.SourceRepository) + pd.v2MetadataService.Remove(mountFrom) + mountAttemptsRemaining-- + } + } + + if layerUpload == nil { + layerUpload, err = bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + } + defer layerUpload.Close() + + arch, err := pd.layer.TarStream() + if err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + + // don't care if this fails; best effort + size, _ := pd.layer.DiffSize() + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing") + compressedReader, compressionDone := compress(reader) + defer func() { + reader.Close() + <-compressionDone + }() + + digester := digest.Canonical.New() + tee := io.TeeReader(compressedReader, digester.Hash()) + + nn, err := layerUpload.ReadFrom(tee) + compressedReader.Close() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + pushDigest := digester.Digest() + if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) + progress.Update(progressOutput, pd.ID(), "Pushed") + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + + pd.pushState.Lock() + + // If Commit succeeded, that's an indication that the remote registry + // speaks the v2 protocol. + pd.pushState.confirmedV2 = true + + descriptor := distribution.Descriptor{ + Digest: pushDigest, + MediaType: schema2.MediaTypeLayer, + Size: nn, + } + pd.pushState.remoteLayers[diffID] = descriptor + + pd.pushState.Unlock() + + return descriptor, nil +} + +func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { + pd.remoteDescriptor = descriptor +} + +func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { + return pd.remoteDescriptor +} + +// layerAlreadyExists checks if the registry already know about any of the +// metadata passed in the "metadata" slice. If it finds one that the registry +// knows about, it returns the known digest and "true". +func layerAlreadyExists(ctx context.Context, metadata []metadata.V2Metadata, repoInfo reference.Named, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) { + for _, meta := range metadata { + // Only check blobsums that are known to this repository or have an unknown source + if meta.SourceRepository != "" && meta.SourceRepository != repoInfo.FullName() { + continue + } + descriptor, err := repo.Blobs(ctx).Stat(ctx, meta.Digest) + switch err { + case nil: + descriptor.MediaType = schema2.MediaTypeLayer + return descriptor, true, nil + case distribution.ErrBlobUnknown: + // nop + default: + return distribution.Descriptor{}, false, err + } + } + return distribution.Descriptor{}, false, nil +} diff --git a/vendor/github.com/docker/docker/distribution/registry.go b/vendor/github.com/docker/docker/distribution/registry.go new file mode 100644 index 00000000..f768a14d --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/registry.go @@ -0,0 +1,130 @@ +package distribution + +import ( + "fmt" + "net" + "net/http" + "net/url" + "time" + + "github.com/docker/distribution" + distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +type dumbCredentialStore struct { + auth *types.AuthConfig +} + +func (dcs dumbCredentialStore) Basic(*url.URL) (string, string) { + return dcs.auth.Username, dcs.auth.Password +} + +func (dcs dumbCredentialStore) RefreshToken(*url.URL, string) string { + return dcs.auth.IdentityToken +} + +func (dcs dumbCredentialStore) SetRefreshToken(*url.URL, string, string) { +} + +// NewV2Repository returns a repository (v2 only). It creates a HTTP transport +// providing timeout settings and authentication support, and also verifies the +// remote API version. +func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { + repoName := repoInfo.FullName() + // If endpoint does not support CanonicalName, use the RemoteName instead + if endpoint.TrimHostname { + repoName = repoInfo.RemoteName() + } + + // TODO(dmcgowan): Call close idle connections when complete, use keep alive + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), metaHeaders) + authTransport := transport.NewTransport(base, modifiers...) + + challengeManager, foundVersion, err := registry.PingV2Registry(endpoint, authTransport) + if err != nil { + transportOK := false + if responseErr, ok := err.(registry.PingResponseError); ok { + transportOK = true + err = responseErr.Err + } + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: transportOK, + } + } + + if authConfig.RegistryToken != "" { + passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) + } else { + creds := dumbCredentialStore{auth: authConfig} + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + Scopes: []auth.Scope{ + auth.RepositoryScope{ + Repository: repoName, + Actions: actions, + }, + }, + ClientID: registry.AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + } + tr := transport.NewTransport(base, modifiers...) + + repoNameRef, err := distreference.ParseNamed(repoName) + if err != nil { + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + + repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr) + if err != nil { + err = fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + return +} + +type existingTokenHandler struct { + token string +} + +func (th *existingTokenHandler) Scheme() string { + return "bearer" +} + +func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/download.go b/vendor/github.com/docker/docker/distribution/xfer/download.go new file mode 100644 index 00000000..739c427c --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/download.go @@ -0,0 +1,430 @@ +package xfer + +import ( + "errors" + "fmt" + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxDownloadAttempts = 5 + +// LayerDownloadManager figures out which layers need to be downloaded, then +// registers and downloads those, taking into account dependencies between +// layers. +type LayerDownloadManager struct { + layerStore layer.Store + tm TransferManager +} + +// NewLayerDownloadManager returns a new LayerDownloadManager. +func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int) *LayerDownloadManager { + return &LayerDownloadManager{ + layerStore: layerStore, + tm: NewTransferManager(concurrencyLimit), + } +} + +type downloadTransfer struct { + Transfer + + layerStore layer.Store + layer layer.Layer + err error +} + +// result returns the layer resulting from the download, if the download +// and registration were successful. +func (d *downloadTransfer) result() (layer.Layer, error) { + return d.layer, d.err +} + +// A DownloadDescriptor references a layer that may need to be downloaded. +type DownloadDescriptor interface { + // Key returns the key used to deduplicate downloads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer, or an error + // if it is unknown (for example, if it has not been downloaded + // before). + DiffID() (layer.DiffID, error) + // Download is called to perform the download. + Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) + // Close is called when the download manager is finished with this + // descriptor and will not call Download again or read from the reader + // that Download returned. + Close() +} + +// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an +// additional Registered method which gets called after a downloaded layer is +// registered. This allows the user of the download manager to know the DiffID +// of each registered layer. This method is called if a cast to +// DownloadDescriptorWithRegistered is successful. +type DownloadDescriptorWithRegistered interface { + DownloadDescriptor + Registered(diffID layer.DiffID) +} + +// Download is a blocking function which ensures the requested layers are +// present in the layer store. It uses the string returned by the Key method to +// deduplicate downloads. If a given layer is not already known to present in +// the layer store, and the key is not used by an in-progress download, the +// Download method is called to get the layer tar data. Layers are then +// registered in the appropriate order. The caller must call the returned +// release function once it is is done with the returned RootFS object. +func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + var ( + topLayer layer.Layer + topDownload *downloadTransfer + watcher *Watcher + missingLayer bool + transferKey = "" + downloadsByKey = make(map[string]*downloadTransfer) + ) + + rootFS := initialRootFS + for _, descriptor := range layers { + key := descriptor.Key() + transferKey += key + + if !missingLayer { + missingLayer = true + diffID, err := descriptor.DiffID() + if err == nil { + getRootFS := rootFS + getRootFS.Append(diffID) + l, err := ldm.layerStore.Get(getRootFS.ChainID()) + if err == nil { + // Layer already exists. + logrus.Debugf("Layer already exists: %s", descriptor.ID()) + progress.Update(progressOutput, descriptor.ID(), "Already exists") + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + topLayer = l + missingLayer = false + rootFS.Append(diffID) + continue + } + } + } + + // Does this layer have the same data as a previous layer in + // the stack? If so, avoid downloading it more than once. + var topDownloadUncasted Transfer + if existingDownload, ok := downloadsByKey[key]; ok { + xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload) + defer topDownload.Transfer.Release(watcher) + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + continue + } + + // Layer is not known to exist - download and register it. + progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") + + var xferFunc DoFunc + if topDownload != nil { + xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload) + defer topDownload.Transfer.Release(watcher) + } else { + xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil) + } + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + downloadsByKey[key] = topDownload + } + + if topDownload == nil { + return rootFS, func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }, nil + } + + // Won't be using the list built up so far - will generate it + // from downloaded layers instead. + rootFS.DiffIDs = []layer.DiffID{} + + defer func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }() + + select { + case <-ctx.Done(): + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, ctx.Err() + case <-topDownload.Done(): + break + } + + l, err := topDownload.result() + if err != nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, err + } + + // Must do this exactly len(layers) times, so we don't include the + // base layer on Windows. + for range layers { + if l == nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, errors.New("internal error: too few parent layers") + } + rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) + l = l.Parent() + } + return rootFS, func() { topDownload.Transfer.Release(watcher) }, err +} + +// makeDownloadFunc returns a function that performs the layer download and +// registration. If parentDownload is non-nil, it waits for that download to +// complete before the registration step, and registers the downloaded data +// on top of parentDownload's resulting layer. Otherwise, it registers the +// layer on top of the ChainID given by parentLayer. +func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + if parentDownload != nil { + // Did the parent download already fail or get + // cancelled? + select { + case <-parentDownload.Done(): + _, err := parentDownload.result() + if err != nil { + d.err = err + return + } + default: + } + } + + var ( + downloadReader io.ReadCloser + size int64 + err error + retries int + ) + + defer descriptor.Close() + + for { + downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) + if err == nil { + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-d.Transfer.Context().Done(): + d.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { + logrus.Errorf("Download failed: %v", err) + d.err = err + return + } + + logrus.Errorf("Download failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(time.Second) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-d.Transfer.Context().Done(): + ticker.Stop() + d.err = errors.New("download cancelled during retry delay") + return + } + + } + } + + close(inactive) + + if parentDownload != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + downloadReader.Close() + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + downloadReader.Close() + return + } + parentLayer = l.ChainID() + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") + defer reader.Close() + + inflatedLayerData, err := archive.DecompressStream(reader) + if err != nil { + d.err = fmt.Errorf("could not get decompression stream: %v", err) + return + } + + d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) + if err != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + default: + d.err = fmt.Errorf("failed to register layer: %v", err) + } + return + } + + progress.Update(progressOutput, descriptor.ID(), "Pull complete") + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} + +// makeDownloadFuncFromDownload returns a function that performs the layer +// registration when the layer data is coming from an existing download. It +// waits for sourceDownload and parentDownload to complete, and then +// reregisters the data from sourceDownload's top layer on top of +// parentDownload. This function does not log progress output because it would +// interfere with the progress reporting for sourceDownload, which has the same +// Key. +func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + <-start + + close(inactive) + + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + return + } + parentLayer := l.ChainID() + + // sourceDownload should have already finished if + // parentDownload finished, but wait for it explicitly + // to be sure. + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-sourceDownload.Done(): + } + + l, err = sourceDownload.result() + if err != nil { + d.err = err + return + } + + layerReader, err := l.TarStream() + if err != nil { + d.err = err + return + } + defer layerReader.Close() + + d.layer, err = d.layerStore.Register(layerReader, parentLayer) + if err != nil { + d.err = fmt.Errorf("failed to register layer: %v", err) + return + } + + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/transfer.go b/vendor/github.com/docker/docker/distribution/xfer/transfer.go new file mode 100644 index 00000000..dd83f8b8 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/transfer.go @@ -0,0 +1,392 @@ +package xfer + +import ( + "runtime" + "sync" + + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +// DoNotRetry is an error wrapper indicating that the error cannot be resolved +// with a retry. +type DoNotRetry struct { + Err error +} + +// Error returns the stringified representation of the encapsulated error. +func (e DoNotRetry) Error() string { + return e.Err.Error() +} + +// Watcher is returned by Watch and can be passed to Release to stop watching. +type Watcher struct { + // signalChan is used to signal to the watcher goroutine that + // new progress information is available, or that the transfer + // has finished. + signalChan chan struct{} + // releaseChan signals to the watcher goroutine that the watcher + // should be detached. + releaseChan chan struct{} + // running remains open as long as the watcher is watching the + // transfer. It gets closed if the transfer finishes or the + // watcher is detached. + running chan struct{} +} + +// Transfer represents an in-progress transfer. +type Transfer interface { + Watch(progressOutput progress.Output) *Watcher + Release(*Watcher) + Context() context.Context + Close() + Done() <-chan struct{} + Released() <-chan struct{} + Broadcast(masterProgressChan <-chan progress.Progress) +} + +type transfer struct { + mu sync.Mutex + + ctx context.Context + cancel context.CancelFunc + + // watchers keeps track of the goroutines monitoring progress output, + // indexed by the channels that release them. + watchers map[chan struct{}]*Watcher + + // lastProgress is the most recently received progress event. + lastProgress progress.Progress + // hasLastProgress is true when lastProgress has been set. + hasLastProgress bool + + // running remains open as long as the transfer is in progress. + running chan struct{} + // released stays open until all watchers release the transfer and + // the transfer is no longer tracked by the transfer manager. + released chan struct{} + + // broadcastDone is true if the master progress channel has closed. + broadcastDone bool + // closed is true if Close has been called + closed bool + // broadcastSyncChan allows watchers to "ping" the broadcasting + // goroutine to wait for it for deplete its input channel. This ensures + // a detaching watcher won't miss an event that was sent before it + // started detaching. + broadcastSyncChan chan struct{} +} + +// NewTransfer creates a new transfer. +func NewTransfer() Transfer { + t := &transfer{ + watchers: make(map[chan struct{}]*Watcher), + running: make(chan struct{}), + released: make(chan struct{}), + broadcastSyncChan: make(chan struct{}), + } + + // This uses context.Background instead of a caller-supplied context + // so that a transfer won't be cancelled automatically if the client + // which requested it is ^C'd (there could be other viewers). + t.ctx, t.cancel = context.WithCancel(context.Background()) + + return t +} + +// Broadcast copies the progress and error output to all viewers. +func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { + for { + var ( + p progress.Progress + ok bool + ) + select { + case p, ok = <-masterProgressChan: + default: + // We've depleted the channel, so now we can handle + // reads on broadcastSyncChan to let detaching watchers + // know we're caught up. + select { + case <-t.broadcastSyncChan: + continue + case p, ok = <-masterProgressChan: + } + } + + t.mu.Lock() + if ok { + t.lastProgress = p + t.hasLastProgress = true + for _, w := range t.watchers { + select { + case w.signalChan <- struct{}{}: + default: + } + } + } else { + t.broadcastDone = true + } + t.mu.Unlock() + if !ok { + close(t.running) + return + } + } +} + +// Watch adds a watcher to the transfer. The supplied channel gets progress +// updates and is closed when the transfer finishes. +func (t *transfer) Watch(progressOutput progress.Output) *Watcher { + t.mu.Lock() + defer t.mu.Unlock() + + w := &Watcher{ + releaseChan: make(chan struct{}), + signalChan: make(chan struct{}), + running: make(chan struct{}), + } + + t.watchers[w.releaseChan] = w + + if t.broadcastDone { + close(w.running) + return w + } + + go func() { + defer func() { + close(w.running) + }() + var ( + done bool + lastWritten progress.Progress + hasLastWritten bool + ) + for { + t.mu.Lock() + hasLastProgress := t.hasLastProgress + lastProgress := t.lastProgress + t.mu.Unlock() + + // Make sure we don't write the last progress item + // twice. + if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { + progressOutput.WriteProgress(lastProgress) + lastWritten = lastProgress + hasLastWritten = true + } + + if done { + return + } + + select { + case <-w.signalChan: + case <-w.releaseChan: + done = true + // Since the watcher is going to detach, make + // sure the broadcaster is caught up so we + // don't miss anything. + select { + case t.broadcastSyncChan <- struct{}{}: + case <-t.running: + } + case <-t.running: + done = true + } + } + }() + + return w +} + +// Release is the inverse of Watch; indicating that the watcher no longer wants +// to be notified about the progress of the transfer. All calls to Watch must +// be paired with later calls to Release so that the lifecycle of the transfer +// is properly managed. +func (t *transfer) Release(watcher *Watcher) { + t.mu.Lock() + delete(t.watchers, watcher.releaseChan) + + if len(t.watchers) == 0 { + if t.closed { + // released may have been closed already if all + // watchers were released, then another one was added + // while waiting for a previous watcher goroutine to + // finish. + select { + case <-t.released: + default: + close(t.released) + } + } else { + t.cancel() + } + } + t.mu.Unlock() + + close(watcher.releaseChan) + // Block until the watcher goroutine completes + <-watcher.running +} + +// Done returns a channel which is closed if the transfer completes or is +// cancelled. Note that having 0 watchers causes a transfer to be cancelled. +func (t *transfer) Done() <-chan struct{} { + // Note that this doesn't return t.ctx.Done() because that channel will + // be closed the moment Cancel is called, and we need to return a + // channel that blocks until a cancellation is actually acknowledged by + // the transfer function. + return t.running +} + +// Released returns a channel which is closed once all watchers release the +// transfer AND the transfer is no longer tracked by the transfer manager. +func (t *transfer) Released() <-chan struct{} { + return t.released +} + +// Context returns the context associated with the transfer. +func (t *transfer) Context() context.Context { + return t.ctx +} + +// Close is called by the transfer manager when the transfer is no longer +// being tracked. +func (t *transfer) Close() { + t.mu.Lock() + t.closed = true + if len(t.watchers) == 0 { + close(t.released) + } + t.mu.Unlock() +} + +// DoFunc is a function called by the transfer manager to actually perform +// a transfer. It should be non-blocking. It should wait until the start channel +// is closed before transferring any data. If the function closes inactive, that +// signals to the transfer manager that the job is no longer actively moving +// data - for example, it may be waiting for a dependent transfer to finish. +// This prevents it from taking up a slot. +type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer + +// TransferManager is used by LayerDownloadManager and LayerUploadManager to +// schedule and deduplicate transfers. It is up to the TransferManager +// implementation to make the scheduling and concurrency decisions. +type TransferManager interface { + // Transfer checks if a transfer with the given key is in progress. If + // so, it returns progress and error output from that transfer. + // Otherwise, it will call xferFunc to initiate the transfer. + Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) +} + +type transferManager struct { + mu sync.Mutex + + concurrencyLimit int + activeTransfers int + transfers map[string]Transfer + waitingTransfers []chan struct{} +} + +// NewTransferManager returns a new TransferManager. +func NewTransferManager(concurrencyLimit int) TransferManager { + return &transferManager{ + concurrencyLimit: concurrencyLimit, + transfers: make(map[string]Transfer), + } +} + +// Transfer checks if a transfer matching the given key is in progress. If not, +// it starts one by calling xferFunc. The caller supplies a channel which +// receives progress output from the transfer. +func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { + tm.mu.Lock() + defer tm.mu.Unlock() + + for { + xfer, present := tm.transfers[key] + if !present { + break + } + // Transfer is already in progress. + watcher := xfer.Watch(progressOutput) + + select { + case <-xfer.Context().Done(): + // We don't want to watch a transfer that has been cancelled. + // Wait for it to be removed from the map and try again. + xfer.Release(watcher) + tm.mu.Unlock() + // The goroutine that removes this transfer from the + // map is also waiting for xfer.Done(), so yield to it. + // This could be avoided by adding a Closed method + // to Transfer to allow explicitly waiting for it to be + // removed the map, but forcing a scheduling round in + // this very rare case seems better than bloating the + // interface definition. + runtime.Gosched() + <-xfer.Done() + tm.mu.Lock() + default: + return xfer, watcher + } + } + + start := make(chan struct{}) + inactive := make(chan struct{}) + + if tm.activeTransfers < tm.concurrencyLimit { + close(start) + tm.activeTransfers++ + } else { + tm.waitingTransfers = append(tm.waitingTransfers, start) + } + + masterProgressChan := make(chan progress.Progress) + xfer := xferFunc(masterProgressChan, start, inactive) + watcher := xfer.Watch(progressOutput) + go xfer.Broadcast(masterProgressChan) + tm.transfers[key] = xfer + + // When the transfer is finished, remove from the map. + go func() { + for { + select { + case <-inactive: + tm.mu.Lock() + tm.inactivate(start) + tm.mu.Unlock() + inactive = nil + case <-xfer.Done(): + tm.mu.Lock() + if inactive != nil { + tm.inactivate(start) + } + delete(tm.transfers, key) + tm.mu.Unlock() + xfer.Close() + return + } + } + }() + + return xfer, watcher +} + +func (tm *transferManager) inactivate(start chan struct{}) { + // If the transfer was started, remove it from the activeTransfers + // count. + select { + case <-start: + // Start next transfer if any are waiting + if len(tm.waitingTransfers) != 0 { + close(tm.waitingTransfers[0]) + tm.waitingTransfers = tm.waitingTransfers[1:] + } else { + tm.activeTransfers-- + } + default: + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/upload.go b/vendor/github.com/docker/docker/distribution/xfer/upload.go new file mode 100644 index 00000000..563824c1 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/upload.go @@ -0,0 +1,163 @@ +package xfer + +import ( + "errors" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadAttempts = 5 + +// LayerUploadManager provides task management and progress reporting for +// uploads. +type LayerUploadManager struct { + tm TransferManager +} + +// NewLayerUploadManager returns a new LayerUploadManager. +func NewLayerUploadManager(concurrencyLimit int) *LayerUploadManager { + return &LayerUploadManager{ + tm: NewTransferManager(concurrencyLimit), + } +} + +type uploadTransfer struct { + Transfer + + remoteDescriptor distribution.Descriptor + err error +} + +// An UploadDescriptor references a layer that may need to be uploaded. +type UploadDescriptor interface { + // Key returns the key used to deduplicate uploads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer. + DiffID() layer.DiffID + // Upload is called to perform the Upload. + Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) + // SetRemoteDescriptor provides the distribution.Descriptor that was + // returned by Upload. This descriptor is not to be confused with + // the UploadDescriptor interface, which is used for internally + // identifying layers that are being uploaded. + SetRemoteDescriptor(descriptor distribution.Descriptor) +} + +// Upload is a blocking function which ensures the listed layers are present on +// the remote registry. It uses the string returned by the Key method to +// deduplicate uploads. +func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { + var ( + uploads []*uploadTransfer + dedupDescriptors = make(map[string]*uploadTransfer) + ) + + for _, descriptor := range layers { + progress.Update(progressOutput, descriptor.ID(), "Preparing") + + key := descriptor.Key() + if _, present := dedupDescriptors[key]; present { + continue + } + + xferFunc := lum.makeUploadFunc(descriptor) + upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) + defer upload.Release(watcher) + uploads = append(uploads, upload.(*uploadTransfer)) + dedupDescriptors[key] = upload.(*uploadTransfer) + } + + for _, upload := range uploads { + select { + case <-ctx.Done(): + return ctx.Err() + case <-upload.Transfer.Done(): + if upload.err != nil { + return upload.err + } + } + } + for _, l := range layers { + l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor) + } + + return nil +} + +func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + u := &uploadTransfer{ + Transfer: NewTransfer(), + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + retries := 0 + for { + remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput) + if err == nil { + u.remoteDescriptor = remoteDescriptor + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-u.Transfer.Context().Done(): + u.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { + logrus.Errorf("Upload failed: %v", err) + u.err = err + return + } + + logrus.Errorf("Upload failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(time.Second) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-u.Transfer.Context().Done(): + ticker.Stop() + u.err = errors.New("upload cancelled during retry delay") + return + } + } + } + }() + + return u + } +} diff --git a/vendor/github.com/docker/docker/docker/README.md b/vendor/github.com/docker/docker/docker/README.md new file mode 100644 index 00000000..015bc133 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/README.md @@ -0,0 +1,3 @@ +docker.go contains Docker's main function. + +This file provides first line CLI argument parsing and environment variable setting. diff --git a/vendor/github.com/docker/docker/docker/client.go b/vendor/github.com/docker/docker/docker/client.go new file mode 100644 index 00000000..487d4458 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/client.go @@ -0,0 +1,33 @@ +package docker + +import ( + "path/filepath" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils" +) + +var clientFlags = &cli.ClientFlags{FlagSet: new(flag.FlagSet), Common: commonFlags} + +func init() { + client := clientFlags.FlagSet + client.StringVar(&clientFlags.ConfigDir, []string{"-config"}, cliconfig.ConfigDir(), "Location of client config files") + + clientFlags.PostParse = func() { + clientFlags.Common.PostParse() + + if clientFlags.ConfigDir != "" { + cliconfig.SetConfigDir(clientFlags.ConfigDir) + } + + if clientFlags.Common.TrustKey == "" { + clientFlags.Common.TrustKey = filepath.Join(cliconfig.ConfigDir(), defaultTrustKeyFile) + } + + if clientFlags.Common.Debug { + utils.EnableDebug() + } + } +} diff --git a/vendor/github.com/docker/docker/docker/common.go b/vendor/github.com/docker/docker/docker/common.go new file mode 100644 index 00000000..1615ef25 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/common.go @@ -0,0 +1,100 @@ +package docker + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + defaultTrustKeyFile = "key.json" + defaultCaFile = "ca.pem" + defaultKeyFile = "key.pem" + defaultCertFile = "cert.pem" + tlsVerifyKey = "tlsverify" +) + +var ( + commonFlags = &cli.CommonFlags{FlagSet: new(flag.FlagSet)} + + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" +) + +func init() { + if dockerCertPath == "" { + dockerCertPath = cliconfig.ConfigDir() + } + + commonFlags.PostParse = postParseCommon + + cmd := commonFlags.FlagSet + + cmd.BoolVar(&commonFlags.Debug, []string{"D", "-debug"}, false, "Enable debug mode") + cmd.StringVar(&commonFlags.LogLevel, []string{"l", "-log-level"}, "info", "Set the logging level") + cmd.BoolVar(&commonFlags.TLS, []string{"-tls"}, false, "Use TLS; implied by --tlsverify") + cmd.BoolVar(&commonFlags.TLSVerify, []string{"-tlsverify"}, dockerTLSVerify, "Use TLS and verify the remote") + + // TODO use flag flag.String([]string{"i", "-identity"}, "", "Path to libtrust key file") + + var tlsOptions tlsconfig.Options + commonFlags.TLSOptions = &tlsOptions + cmd.StringVar(&tlsOptions.CAFile, []string{"-tlscacert"}, filepath.Join(dockerCertPath, defaultCaFile), "Trust certs signed only by this CA") + cmd.StringVar(&tlsOptions.CertFile, []string{"-tlscert"}, filepath.Join(dockerCertPath, defaultCertFile), "Path to TLS certificate file") + cmd.StringVar(&tlsOptions.KeyFile, []string{"-tlskey"}, filepath.Join(dockerCertPath, defaultKeyFile), "Path to TLS key file") + + cmd.Var(opts.NewNamedListOptsRef("hosts", &commonFlags.Hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to") +} + +func postParseCommon() { + cmd := commonFlags.FlagSet + + setDaemonLogLevel(commonFlags.LogLevel) + + // Regardless of whether the user sets it to true or false, if they + // specify --tlsverify at all then we need to turn on tls + // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need to check that here as well + if cmd.IsSet("-"+tlsVerifyKey) || commonFlags.TLSVerify { + commonFlags.TLS = true + } + + if !commonFlags.TLS { + commonFlags.TLSOptions = nil + } else { + tlsOptions := commonFlags.TLSOptions + tlsOptions.InsecureSkipVerify = !commonFlags.TLSVerify + + // Reset CertFile and KeyFile to empty string if the user did not specify + // the respective flags and the respective default files were not found. + if !cmd.IsSet("-tlscert") { + if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { + tlsOptions.CertFile = "" + } + } + if !cmd.IsSet("-tlskey") { + if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { + tlsOptions.KeyFile = "" + } + } + } +} + +func setDaemonLogLevel(logLevel string) { + if logLevel != "" { + lvl, err := logrus.ParseLevel(logLevel) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) + os.Exit(1) + } + logrus.SetLevel(lvl) + } else { + logrus.SetLevel(logrus.InfoLevel) + } +} diff --git a/vendor/github.com/docker/docker/docker/daemon.go b/vendor/github.com/docker/docker/docker/daemon.go new file mode 100644 index 00000000..7a0b53f4 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/daemon.go @@ -0,0 +1,417 @@ +// +build daemon + +package docker + +import ( + "crypto/tls" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/build" + "github.com/docker/docker/api/server/router/container" + "github.com/docker/docker/api/server/router/image" + systemrouter "github.com/docker/docker/api/server/router/system" + "github.com/docker/docker/api/server/router/volume" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/docker/listeners" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonlog" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/pidfile" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + daemonUsage = " docker daemon [ --help | ... ]\n" + daemonConfigFileFlag = "-config-file" +) + +var ( + daemonCli cli.Handler = NewDaemonCli() +) + +// DaemonCli represents the daemon CLI. +type DaemonCli struct { + *daemon.Config + flags *flag.FlagSet +} + +func presentInHelp(usage string) string { return usage } +func absentFromHelp(string) string { return "" } + +// NewDaemonCli returns a pre-configured daemon CLI +func NewDaemonCli() *DaemonCli { + daemonFlags := cli.Subcmd("daemon", nil, "Enable daemon mode", true) + + // TODO(tiborvass): remove InstallFlags? + daemonConfig := new(daemon.Config) + daemonConfig.LogConfig.Config = make(map[string]string) + daemonConfig.ClusterOpts = make(map[string]string) + + if runtime.GOOS != "linux" { + daemonConfig.V2Only = true + } + + daemonConfig.InstallFlags(daemonFlags, presentInHelp) + daemonConfig.InstallFlags(flag.CommandLine, absentFromHelp) + daemonFlags.Require(flag.Exact, 0) + + return &DaemonCli{ + Config: daemonConfig, + flags: daemonFlags, + } +} + +func migrateKey() (err error) { + // Migrate trust key if exists at ~/.docker/key.json and owned by current user + oldPath := filepath.Join(cliconfig.ConfigDir(), defaultTrustKeyFile) + newPath := filepath.Join(getDaemonConfDir(), defaultTrustKeyFile) + if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && currentUserIsOwner(oldPath) { + defer func() { + // Ensure old path is removed if no error occurred + if err == nil { + err = os.Remove(oldPath) + } else { + logrus.Warnf("Key migration failed, key file not removed at %s", oldPath) + os.Remove(newPath) + } + }() + + if err := system.MkdirAll(getDaemonConfDir(), os.FileMode(0644)); err != nil { + return fmt.Errorf("Unable to create daemon configuration directory: %s", err) + } + + newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return fmt.Errorf("error creating key file %q: %s", newPath, err) + } + defer newFile.Close() + + oldFile, err := os.Open(oldPath) + if err != nil { + return fmt.Errorf("error opening key file %q: %s", oldPath, err) + } + defer oldFile.Close() + + if _, err := io.Copy(newFile, oldFile); err != nil { + return fmt.Errorf("error copying key: %s", err) + } + + logrus.Infof("Migrated key from %s to %s", oldPath, newPath) + } + + return nil +} + +func getGlobalFlag() (globalFlag *flag.Flag) { + defer func() { + if x := recover(); x != nil { + switch f := x.(type) { + case *flag.Flag: + globalFlag = f + default: + panic(x) + } + } + }() + visitor := func(f *flag.Flag) { panic(f) } + commonFlags.FlagSet.Visit(visitor) + clientFlags.FlagSet.Visit(visitor) + return +} + +// CmdDaemon is the daemon command, called the raw arguments after `docker daemon`. +func (cli *DaemonCli) CmdDaemon(args ...string) error { + // warn from uuid package when running the daemon + uuid.Loggerf = logrus.Warnf + + if !commonFlags.FlagSet.IsEmpty() || !clientFlags.FlagSet.IsEmpty() { + // deny `docker -D daemon` + illegalFlag := getGlobalFlag() + fmt.Fprintf(os.Stderr, "invalid flag '-%s'.\nSee 'docker daemon --help'.\n", illegalFlag.Names[0]) + os.Exit(1) + } else { + // allow new form `docker daemon -D` + flag.Merge(cli.flags, commonFlags.FlagSet) + } + + configFile := cli.flags.String([]string{daemonConfigFileFlag}, defaultDaemonConfigFile, "Daemon configuration file") + + cli.flags.ParseFlags(args, true) + commonFlags.PostParse() + + if commonFlags.TrustKey == "" { + commonFlags.TrustKey = filepath.Join(getDaemonConfDir(), defaultTrustKeyFile) + } + cliConfig, err := loadDaemonCliConfig(cli.Config, cli.flags, commonFlags, *configFile) + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + cli.Config = cliConfig + + if cli.Config.Debug { + utils.EnableDebug() + } + + if utils.ExperimentalBuild() { + logrus.Warn("Running experimental build") + } + + logrus.SetFormatter(&logrus.TextFormatter{ + TimestampFormat: jsonlog.RFC3339NanoFixed, + DisableColors: cli.Config.RawLogs, + }) + + if err := setDefaultUmask(); err != nil { + logrus.Fatalf("Failed to set umask: %v", err) + } + + if len(cli.LogConfig.Config) > 0 { + if err := logger.ValidateLogOpts(cli.LogConfig.Type, cli.LogConfig.Config); err != nil { + logrus.Fatalf("Failed to set log opts: %v", err) + } + } + + var pfile *pidfile.PIDFile + if cli.Pidfile != "" { + pf, err := pidfile.New(cli.Pidfile) + if err != nil { + logrus.Fatalf("Error starting daemon: %v", err) + } + pfile = pf + defer func() { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + }() + } + + serverConfig := &apiserver.Config{ + AuthorizationPluginNames: cli.Config.AuthorizationPlugins, + Logging: true, + SocketGroup: cli.Config.SocketGroup, + Version: dockerversion.Version, + } + serverConfig = setPlatformServerConfig(serverConfig, cli.Config) + + if cli.Config.TLS { + tlsOptions := tlsconfig.Options{ + CAFile: cli.Config.CommonTLSOptions.CAFile, + CertFile: cli.Config.CommonTLSOptions.CertFile, + KeyFile: cli.Config.CommonTLSOptions.KeyFile, + } + + if cli.Config.TLSVerify { + // server requires and verifies client's certificate + tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert + } + tlsConfig, err := tlsconfig.Server(tlsOptions) + if err != nil { + logrus.Fatal(err) + } + serverConfig.TLSConfig = tlsConfig + } + + if len(cli.Config.Hosts) == 0 { + cli.Config.Hosts = make([]string, 1) + } + + api := apiserver.New(serverConfig) + + for i := 0; i < len(cli.Config.Hosts); i++ { + var err error + if cli.Config.Hosts[i], err = opts.ParseHost(cli.Config.TLS, cli.Config.Hosts[i]); err != nil { + logrus.Fatalf("error parsing -H %s : %v", cli.Config.Hosts[i], err) + } + + protoAddr := cli.Config.Hosts[i] + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + logrus.Fatalf("bad format %s, expected PROTO://ADDR", protoAddr) + } + l, err := listeners.Init(protoAddrParts[0], protoAddrParts[1], serverConfig.SocketGroup, serverConfig.TLSConfig) + if err != nil { + logrus.Fatal(err) + } + + logrus.Debugf("Listener created for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) + api.Accept(protoAddrParts[1], l...) + } + + if err := migrateKey(); err != nil { + logrus.Fatal(err) + } + cli.TrustKeyPath = commonFlags.TrustKey + + registryService := registry.NewService(cli.Config.ServiceOptions) + + containerdRemote, err := libcontainerd.New(filepath.Join(cli.Config.ExecRoot, "libcontainerd"), cli.getPlatformRemoteOptions()...) + if err != nil { + logrus.Fatal(err) + } + + d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote) + if err != nil { + if pfile != nil { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + } + logrus.Fatalf("Error starting daemon: %v", err) + } + + logrus.Info("Daemon has completed initialization") + + logrus.WithFields(logrus.Fields{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, + "graphdriver": d.GraphDriverName(), + }).Info("Docker daemon") + + initRouter(api, d) + + reload := func(config *daemon.Config) { + if err := d.Reload(config); err != nil { + logrus.Errorf("Error reconfiguring the daemon: %v", err) + return + } + if config.IsValueSet("debug") { + debugEnabled := utils.IsDebugEnabled() + switch { + case debugEnabled && !config.Debug: // disable debug + utils.DisableDebug() + api.DisableProfiler() + case config.Debug && !debugEnabled: // enable debug + utils.EnableDebug() + api.EnableProfiler() + } + + } + } + + setupConfigReloadTrap(*configFile, cli.flags, reload) + + // The serve API routine never exits unless an error occurs + // We need to start it as a goroutine and wait on it so + // daemon doesn't exit + serveAPIWait := make(chan error) + go api.Wait(serveAPIWait) + + signal.Trap(func() { + api.Close() + <-serveAPIWait + shutdownDaemon(d, 15) + if pfile != nil { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + } + }) + + // after the daemon is done setting up we can notify systemd api + notifySystem() + + // Daemon is fully initialized and handling API traffic + // Wait for serve API to complete + errAPI := <-serveAPIWait + shutdownDaemon(d, 15) + containerdRemote.Cleanup() + if errAPI != nil { + if pfile != nil { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + } + logrus.Fatalf("Shutting down due to ServeAPI error: %v", errAPI) + } + return nil +} + +// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case +// d.Shutdown() is waiting too long to kill container or worst it's +// blocked there +func shutdownDaemon(d *daemon.Daemon, timeout time.Duration) { + ch := make(chan struct{}) + go func() { + d.Shutdown() + close(ch) + }() + select { + case <-ch: + logrus.Debug("Clean shutdown succeeded") + case <-time.After(timeout * time.Second): + logrus.Error("Force shutdown daemon") + } +} + +func loadDaemonCliConfig(config *daemon.Config, daemonFlags *flag.FlagSet, commonConfig *cli.CommonFlags, configFile string) (*daemon.Config, error) { + config.Debug = commonConfig.Debug + config.Hosts = commonConfig.Hosts + config.LogLevel = commonConfig.LogLevel + config.TLS = commonConfig.TLS + config.TLSVerify = commonConfig.TLSVerify + config.CommonTLSOptions = daemon.CommonTLSOptions{} + + if commonConfig.TLSOptions != nil { + config.CommonTLSOptions.CAFile = commonConfig.TLSOptions.CAFile + config.CommonTLSOptions.CertFile = commonConfig.TLSOptions.CertFile + config.CommonTLSOptions.KeyFile = commonConfig.TLSOptions.KeyFile + } + + if configFile != "" { + c, err := daemon.MergeDaemonConfigurations(config, daemonFlags, configFile) + if err != nil { + if daemonFlags.IsSet(daemonConfigFileFlag) || !os.IsNotExist(err) { + return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v\n", configFile, err) + } + } + // the merged configuration can be nil if the config file didn't exist. + // leave the current configuration as it is if when that happens. + if c != nil { + config = c + } + } + + // Regardless of whether the user sets it to true or false, if they + // specify TLSVerify at all then we need to turn on TLS + if config.IsValueSet(tlsVerifyKey) { + config.TLS = true + } + + // ensure that the log level is the one set after merging configurations + setDaemonLogLevel(config.LogLevel) + + return config, nil +} + +func initRouter(s *apiserver.Server, d *daemon.Daemon) { + routers := []router.Router{ + container.NewRouter(d), + image.NewRouter(d), + systemrouter.NewRouter(d), + volume.NewRouter(d), + build.NewRouter(dockerfile.NewBuildManager(d)), + } + + s.InitRouter(utils.IsDebugEnabled(), routers...) +} diff --git a/vendor/github.com/docker/docker/docker/daemon_freebsd.go b/vendor/github.com/docker/docker/docker/daemon_freebsd.go new file mode 100644 index 00000000..013f0e91 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/daemon_freebsd.go @@ -0,0 +1,7 @@ +// +build daemon + +package docker + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} diff --git a/vendor/github.com/docker/docker/docker/daemon_linux.go b/vendor/github.com/docker/docker/docker/daemon_linux.go new file mode 100644 index 00000000..3ff9a49e --- /dev/null +++ b/vendor/github.com/docker/docker/docker/daemon_linux.go @@ -0,0 +1,13 @@ +// +build daemon + +package docker + +import ( + systemdDaemon "github.com/coreos/go-systemd/daemon" +) + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { + // Tell the init daemon we are accepting requests + go systemdDaemon.SdNotify("READY=1") +} diff --git a/vendor/github.com/docker/docker/docker/daemon_none.go b/vendor/github.com/docker/docker/docker/daemon_none.go new file mode 100644 index 00000000..b2cbfd41 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/daemon_none.go @@ -0,0 +1,13 @@ +// +build !daemon + +package docker + +import "github.com/docker/docker/cli" + +const daemonUsage = "" + +var daemonCli cli.Handler + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} diff --git a/vendor/github.com/docker/docker/docker/daemon_unix.go b/vendor/github.com/docker/docker/docker/daemon_unix.go new file mode 100644 index 00000000..8e212f8e --- /dev/null +++ b/vendor/github.com/docker/docker/docker/daemon_unix.go @@ -0,0 +1,82 @@ +// +build daemon,!windows + +package docker + +import ( + "fmt" + "os" + "os/signal" + "syscall" + + "github.com/Sirupsen/logrus" + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/daemon" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/system" +) + +const defaultDaemonConfigFile = "/etc/docker/daemon.json" + +func setPlatformServerConfig(serverConfig *apiserver.Config, daemonCfg *daemon.Config) *apiserver.Config { + serverConfig.EnableCors = daemonCfg.EnableCors + serverConfig.CorsHeaders = daemonCfg.CorsHeaders + + return serverConfig +} + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { + if int(fileInfo.UID()) == os.Getuid() { + return true + } + } + return false +} + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + syscall.Umask(desiredUmask) + if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir() string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func setupConfigReloadTrap(configFile string, flags *mflag.FlagSet, reload func(*daemon.Config)) { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + go func() { + for range c { + if err := daemon.ReloadConfiguration(configFile, flags, reload); err != nil { + logrus.Error(err) + } + } + }() +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{ + libcontainerd.WithDebugLog(cli.Config.Debug), + } + if cli.Config.ContainerdAddr != "" { + opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) + } else { + opts = append(opts, libcontainerd.WithStartDaemon(true)) + } + if daemon.UsingSystemd(cli.Config) { + args := []string{"--systemd-cgroup=true"} + opts = append(opts, libcontainerd.WithRuntimeArgs(args)) + } + return opts +} diff --git a/vendor/github.com/docker/docker/docker/docker.go b/vendor/github.com/docker/docker/docker/docker.go new file mode 100644 index 00000000..cbb1e4cf --- /dev/null +++ b/vendor/github.com/docker/docker/docker/docker.go @@ -0,0 +1,77 @@ +package docker + +import ( + "fmt" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/client" + "github.com/docker/docker/cli" + "github.com/docker/docker/dockerversion" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" +) + +func Main() { + // Set terminal emulation based on platform as required. + stdin, stdout, stderr := term.StdStreams() + + logrus.SetOutput(stderr) + + flag.Merge(flag.CommandLine, clientFlags.FlagSet, commonFlags.FlagSet) + + flag.Usage = func() { + fmt.Fprint(stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n"+daemonUsage+" docker [ --help | -v | --version ]\n\n") + fmt.Fprint(stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") + + flag.CommandLine.SetOutput(stdout) + flag.PrintDefaults() + + help := "\nCommands:\n" + + for _, cmd := range dockerCommands { + help += fmt.Sprintf(" %-10.10s%s\n", cmd.Name, cmd.Description) + } + + help += "\nRun 'docker COMMAND --help' for more information on a command." + fmt.Fprintf(stdout, "%s\n", help) + } + + flag.Parse() + + if *flVersion { + showVersion() + return + } + + if *flHelp { + // if global flag --help is present, regardless of what other options and commands there are, + // just print the usage. + flag.Usage() + return + } + + clientCli := client.NewDockerCli(stdin, stdout, stderr, clientFlags) + + c := cli.New(clientCli, daemonCli) + if err := c.Run(flag.Args()...); err != nil { + if sterr, ok := err.(cli.StatusError); ok { + if sterr.Status != "" { + fmt.Fprintln(stderr, sterr.Status) + os.Exit(1) + } + os.Exit(sterr.StatusCode) + } + fmt.Fprintln(stderr, err) + os.Exit(1) + } +} + +func showVersion() { + if utils.ExperimentalBuild() { + fmt.Printf("Docker version %s, build %s, experimental\n", dockerversion.Version, dockerversion.GitCommit) + } else { + fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) + } +} diff --git a/vendor/github.com/docker/docker/docker/flags.go b/vendor/github.com/docker/docker/docker/flags.go new file mode 100644 index 00000000..fdff7b47 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/flags.go @@ -0,0 +1,30 @@ +package docker + +import ( + "sort" + + "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +var ( + flHelp = flag.Bool([]string{"h", "-help"}, false, "Print usage") + flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") +) + +type byName []cli.Command + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { return a[i].Name < a[j].Name } + +var dockerCommands []cli.Command + +// TODO(tiborvass): do not show 'daemon' on client-only binaries + +func init() { + for _, cmd := range cli.DockerCommands { + dockerCommands = append(dockerCommands, cmd) + } + sort.Sort(byName(dockerCommands)) +} diff --git a/vendor/github.com/docker/docker/docker/listeners/listeners.go b/vendor/github.com/docker/docker/docker/listeners/listeners.go new file mode 100644 index 00000000..6ddac0da --- /dev/null +++ b/vendor/github.com/docker/docker/docker/listeners/listeners.go @@ -0,0 +1,19 @@ +package listeners + +import ( + "crypto/tls" + "net" + + "github.com/Sirupsen/logrus" + "github.com/docker/go-connections/sockets" +) + +func initTCPSocket(addr string, tlsConfig *tls.Config) (l net.Listener, err error) { + if tlsConfig == nil || tlsConfig.ClientAuth != tls.RequireAndVerifyClientCert { + logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + } + if l, err = sockets.NewTCPSocket(addr, tlsConfig); err != nil { + return nil, err + } + return +} diff --git a/vendor/github.com/docker/docker/docker/listeners/listeners_unix.go b/vendor/github.com/docker/docker/docker/listeners/listeners_unix.go new file mode 100644 index 00000000..548cb78e --- /dev/null +++ b/vendor/github.com/docker/docker/docker/listeners/listeners_unix.go @@ -0,0 +1,89 @@ +// +build !windows + +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/activation" + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) { + switch proto { + case "fd": + ls, err = listenFD(addr, tlsConfig) + if err != nil { + return nil, err + } + case "tcp": + l, err := initTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + l, err := sockets.NewUnixSocket(addr, socketGroup) + if err != nil { + return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("Invalid protocol format: %q", proto) + } + + return +} + +// listenFD returns the specified socket activated files as a slice of +// net.Listeners or all of the activated files if "*" is given. +func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { + var ( + err error + listeners []net.Listener + ) + // socket activation + if tlsConfig != nil { + listeners, err = activation.TLSListeners(false, tlsConfig) + } else { + listeners, err = activation.Listeners(false) + } + if err != nil { + return nil, err + } + + if len(listeners) == 0 { + return nil, fmt.Errorf("No sockets found. Make sure the docker daemon was started by systemd.") + } + + // default to all fds just like unix:// and tcp:// + if addr == "" || addr == "*" { + return listeners, nil + } + + fdNum, err := strconv.Atoi(addr) + if err != nil { + return nil, fmt.Errorf("failed to parse systemd address, should be number: %v", err) + } + fdOffset := fdNum - 3 + if len(listeners) < int(fdOffset)+1 { + return nil, fmt.Errorf("Too few socket activated files passed in") + } + if listeners[fdOffset] == nil { + return nil, fmt.Errorf("failed to listen on systemd activated file at fd %d", fdOffset+3) + } + for i, ls := range listeners { + if i == fdOffset || ls == nil { + continue + } + if err := ls.Close(); err != nil { + logrus.Errorf("Failed to close systemd activated file at fd %d: %v", fdOffset+3, err) + } + } + return []net.Listener{listeners[fdOffset]}, nil +} diff --git a/vendor/github.com/docker/docker/docker/runc.go b/vendor/github.com/docker/docker/docker/runc.go new file mode 100644 index 00000000..dccbccbb --- /dev/null +++ b/vendor/github.com/docker/docker/docker/runc.go @@ -0,0 +1,12 @@ +// +build !exclude_runc + +package docker + +import ( + "github.com/docker/docker/pkg/reexec" + "github.com/opencontainers/runc" +) + +func init() { + reexec.Register("docker-runc", runc.Main) +} diff --git a/vendor/github.com/docker/docker/dockerversion/useragent.go b/vendor/github.com/docker/docker/dockerversion/useragent.go new file mode 100644 index 00000000..d2a891c4 --- /dev/null +++ b/vendor/github.com/docker/docker/dockerversion/useragent.go @@ -0,0 +1,74 @@ +package dockerversion + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/useragent" + "golang.org/x/net/context" +) + +// DockerUserAgent is the User-Agent the Docker client uses to identify itself. +// In accordance with RFC 7231 (5.5.3) is of the form: +// [docker client's UA] UpstreamClient([upstream client's UA]) +func DockerUserAgent(ctx context.Context) string { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) + + dockerUA := useragent.AppendVersions("", httpVersion...) + upstreamUA := getUserAgentFromContext(ctx) + if len(upstreamUA) > 0 { + ret := insertUpstreamUserAgent(upstreamUA, dockerUA) + return ret + } + return dockerUA +} + +// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists +func getUserAgentFromContext(ctx context.Context) string { + var upstreamUA string + if ctx != nil { + var ki interface{} = ctx.Value(httputils.UAStringKey) + if ki != nil { + upstreamUA = ctx.Value(httputils.UAStringKey).(string) + } + } + return upstreamUA +} + +// escapeStr returns s with every rune in charsToEscape escaped by a backslash +func escapeStr(s string, charsToEscape string) string { + var ret string + for _, currRune := range s { + appended := false + for _, escapeableRune := range charsToEscape { + if currRune == escapeableRune { + ret += `\` + string(currRune) + appended = true + break + } + } + if !appended { + ret += string(currRune) + } + } + return ret +} + +// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent +// string of the form: +// $dockerUA UpstreamClient($upstreamUA) +func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { + charsToEscape := `();\` + upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) + return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) +} diff --git a/vendor/github.com/docker/docker/dockerversion/version_lib.go b/vendor/github.com/docker/docker/dockerversion/version_lib.go new file mode 100644 index 00000000..6644bce2 --- /dev/null +++ b/vendor/github.com/docker/docker/dockerversion/version_lib.go @@ -0,0 +1,13 @@ +// +build !autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "library-import" + Version string = "library-import" + BuildTime string = "library-import" + IAmStatic string = "library-import" +) diff --git a/vendor/github.com/docker/docker/errors/errors.go b/vendor/github.com/docker/docker/errors/errors.go new file mode 100644 index 00000000..8070f48f --- /dev/null +++ b/vendor/github.com/docker/docker/errors/errors.go @@ -0,0 +1,41 @@ +package errors + +import "net/http" + +// apiError is an error wrapper that also +// holds information about response status codes. +type apiError struct { + error + statusCode int +} + +// HTTPErrorStatusCode returns a status code. +func (e apiError) HTTPErrorStatusCode() int { + return e.statusCode +} + +// NewErrorWithStatusCode allows you to associate +// a specific HTTP Status Code to an error. +// The Server will take that code and set +// it as the response status. +func NewErrorWithStatusCode(err error, code int) error { + return apiError{err, code} +} + +// NewBadRequestError creates a new API error +// that has the 400 HTTP status code associated to it. +func NewBadRequestError(err error) error { + return NewErrorWithStatusCode(err, http.StatusBadRequest) +} + +// NewRequestNotFoundError creates a new API error +// that has the 404 HTTP status code associated to it. +func NewRequestNotFoundError(err error) error { + return NewErrorWithStatusCode(err, http.StatusNotFound) +} + +// NewRequestConflictError creates a new API error +// that has the 409 HTTP status code associated to it. +func NewRequestConflictError(err error) error { + return NewErrorWithStatusCode(err, http.StatusConflict) +} diff --git a/vendor/github.com/docker/docker/image/fs.go b/vendor/github.com/docker/docker/image/fs.go index 955e1b85..72c9ab42 100644 --- a/vendor/github.com/docker/docker/image/fs.go +++ b/vendor/github.com/docker/docker/image/fs.go @@ -9,7 +9,6 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/ioutils" ) // IDWalkFunc is function called by StoreBackend.Walk @@ -119,7 +118,12 @@ func (s *fs) Set(data []byte) (ID, error) { } id := ID(digest.FromBytes(data)) - if err := ioutils.AtomicWriteFile(s.contentFile(id), data, 0600); err != nil { + filePath := s.contentFile(id) + tempFilePath := s.contentFile(id) + ".tmp" + if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil { + return "", err + } + if err := os.Rename(tempFilePath, filePath); err != nil { return "", err } @@ -152,7 +156,12 @@ func (s *fs) SetMetadata(id ID, key string, data []byte) error { if err := os.MkdirAll(baseDir, 0700); err != nil { return err } - return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(id), key), data, 0600) + filePath := filepath.Join(s.metadataDir(id), key) + tempFilePath := filePath + ".tmp" + if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil { + return err + } + return os.Rename(tempFilePath, filePath) } // GetMetadata returns metadata for a given ID. diff --git a/vendor/github.com/docker/docker/image/image.go b/vendor/github.com/docker/docker/image/image.go index 7a05e649..52471408 100644 --- a/vendor/github.com/docker/docker/image/image.go +++ b/vendor/github.com/docker/docker/image/image.go @@ -48,11 +48,9 @@ type V1Image struct { // Image stores the image configuration type Image struct { V1Image - Parent ID `json:"parent,omitempty"` - RootFS *RootFS `json:"rootfs,omitempty"` - History []History `json:"history,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` + Parent ID `json:"parent,omitempty"` + RootFS *RootFS `json:"rootfs,omitempty"` + History []History `json:"history,omitempty"` // rawJSON caches the immutable JSON associated with this image. rawJSON []byte diff --git a/vendor/github.com/docker/docker/image/rootfs.go b/vendor/github.com/docker/docker/image/rootfs.go index 76eaae0c..b546696d 100644 --- a/vendor/github.com/docker/docker/image/rootfs.go +++ b/vendor/github.com/docker/docker/image/rootfs.go @@ -2,14 +2,6 @@ package image import "github.com/docker/docker/layer" -// TypeLayers is used for RootFS.Type for filesystems organized into layers. -const TypeLayers = "layers" - -// NewRootFS returns empty RootFS struct -func NewRootFS() *RootFS { - return &RootFS{Type: TypeLayers} -} - // Append appends a new diffID to rootfs func (r *RootFS) Append(id layer.DiffID) { r.DiffIDs = append(r.DiffIDs, id) diff --git a/vendor/github.com/docker/docker/image/rootfs_unix.go b/vendor/github.com/docker/docker/image/rootfs_unix.go index 83498f6c..99c1f8f4 100644 --- a/vendor/github.com/docker/docker/image/rootfs_unix.go +++ b/vendor/github.com/docker/docker/image/rootfs_unix.go @@ -16,3 +16,8 @@ type RootFS struct { func (r *RootFS) ChainID() layer.ChainID { return layer.CreateChainID(r.DiffIDs) } + +// NewRootFS returns empty RootFS struct +func NewRootFS() *RootFS { + return &RootFS{Type: "layers"} +} diff --git a/vendor/github.com/docker/docker/image/rootfs_windows.go b/vendor/github.com/docker/docker/image/rootfs_windows.go deleted file mode 100644 index c5bd5828..00000000 --- a/vendor/github.com/docker/docker/image/rootfs_windows.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build windows - -package image - -import ( - "crypto/sha512" - "fmt" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/layer" -) - -// TypeLayersWithBase is used for RootFS.Type for Windows filesystems that have layers and a centrally-stored base layer. -const TypeLayersWithBase = "layers+base" - -// RootFS describes images root filesystem -// This is currently a placeholder that only supports layers. In the future -// this can be made into an interface that supports different implementations. -type RootFS struct { - Type string `json:"type"` - DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` - BaseLayer string `json:"base_layer,omitempty"` -} - -// BaseLayerID returns the 64 byte hex ID for the baselayer name. -func (r *RootFS) BaseLayerID() string { - if r.Type != TypeLayersWithBase { - panic("tried to get base layer ID without a base layer") - } - baseID := sha512.Sum384([]byte(r.BaseLayer)) - return fmt.Sprintf("%x", baseID[:32]) -} - -// ChainID returns the ChainID for the top layer in RootFS. -func (r *RootFS) ChainID() layer.ChainID { - ids := r.DiffIDs - if r.Type == TypeLayersWithBase { - // Add an extra ID for the base. - baseDiffID := layer.DiffID(digest.FromBytes([]byte(r.BaseLayerID()))) - ids = append([]layer.DiffID{baseDiffID}, ids...) - } - return layer.CreateChainID(ids) -} - -// NewRootFSWithBaseLayer returns a RootFS struct with a base layer -func NewRootFSWithBaseLayer(baseLayer string) *RootFS { - return &RootFS{Type: TypeLayersWithBase, BaseLayer: baseLayer} -} diff --git a/vendor/github.com/docker/docker/image/tarexport/load.go b/vendor/github.com/docker/docker/image/tarexport/load.go new file mode 100644 index 00000000..42eaa40b --- /dev/null +++ b/vendor/github.com/docker/docker/image/tarexport/load.go @@ -0,0 +1,372 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/reference" +) + +func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + var ( + sf = streamformatter.NewJSONStreamFormatter() + progressOutput progress.Output + ) + if !quiet { + progressOutput = sf.NewProgressOutput(outStream, false) + outStream = &streamformatter.StdoutFormatter{Writer: outStream, StreamFormatter: streamformatter.NewJSONStreamFormatter()} + } + + tmpDir, err := ioutil.TempDir("", "docker-import-") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil { + return err + } + // read manifest, if no file then load in legacy mode + manifestPath, err := safePath(tmpDir, manifestFileName) + if err != nil { + return err + } + manifestFile, err := os.Open(manifestPath) + if err != nil { + if os.IsNotExist(err) { + return l.legacyLoad(tmpDir, outStream, progressOutput) + } + return manifestFile.Close() + } + defer manifestFile.Close() + + var manifest []manifestItem + if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil { + return err + } + + var parentLinks []parentLink + + for _, m := range manifest { + configPath, err := safePath(tmpDir, m.Config) + if err != nil { + return err + } + config, err := ioutil.ReadFile(configPath) + if err != nil { + return err + } + img, err := image.NewFromJSON(config) + if err != nil { + return err + } + var rootFS image.RootFS + rootFS = *img.RootFS + rootFS.DiffIDs = nil + + if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { + return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual) + } + + for i, diffID := range img.RootFS.DiffIDs { + layerPath, err := safePath(tmpDir, m.Layers[i]) + if err != nil { + return err + } + r := rootFS + r.Append(diffID) + newLayer, err := l.ls.Get(r.ChainID()) + if err != nil { + newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), progressOutput) + if err != nil { + return err + } + } + defer layer.ReleaseAndLog(l.ls, newLayer) + if expected, actual := diffID, newLayer.DiffID(); expected != actual { + return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) + } + rootFS.Append(diffID) + } + + imgID, err := l.is.Create(config) + if err != nil { + return err + } + + for _, repoTag := range m.RepoTags { + named, err := reference.ParseNamed(repoTag) + if err != nil { + return err + } + ref, ok := named.(reference.NamedTagged) + if !ok { + return fmt.Errorf("invalid tag %q", repoTag) + } + l.setLoadedTag(ref, imgID, outStream) + } + + parentLinks = append(parentLinks, parentLink{imgID, m.Parent}) + } + + for _, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + if err := l.setParentID(p.id, p.parentID); err != nil { + return err + } + } + } + + return nil +} + +func (l *tarexporter) setParentID(id, parentID image.ID) error { + img, err := l.is.Get(id) + if err != nil { + return err + } + parent, err := l.is.Get(parentID) + if err != nil { + return err + } + if !checkValidParent(img, parent) { + return fmt.Errorf("image %v is not a valid parent for %v", parent.ID, img.ID) + } + return l.is.SetParent(id, parentID) +} + +func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, progressOutput progress.Output) (layer.Layer, error) { + rawTar, err := os.Open(filename) + if err != nil { + logrus.Debugf("Error reading embedded tar: %v", err) + return nil, err + } + defer rawTar.Close() + + inflatedLayerData, err := archive.DecompressStream(rawTar) + if err != nil { + return nil, err + } + defer inflatedLayerData.Close() + + if progressOutput != nil { + fileInfo, err := os.Stat(filename) + if err != nil { + logrus.Debugf("Error statting file: %v", err) + return nil, err + } + + progressReader := progress.NewProgressReader(inflatedLayerData, progressOutput, fileInfo.Size(), stringid.TruncateID(id), "Loading layer") + + return l.ls.Register(progressReader, rootFS.ChainID()) + } + return l.ls.Register(inflatedLayerData, rootFS.ChainID()) +} + +func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID image.ID, outStream io.Writer) error { + if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID { + fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", ref.String(), string(prevID)) // todo: this message is wrong in case of multiple tags + } + + if err := l.rs.AddTag(ref, imgID, true); err != nil { + return err + } + return nil +} + +func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOutput progress.Output) error { + legacyLoadedMap := make(map[string]image.ID) + + dirs, err := ioutil.ReadDir(tmpDir) + if err != nil { + return err + } + + // every dir represents an image + for _, d := range dirs { + if d.IsDir() { + if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap, progressOutput); err != nil { + return err + } + } + } + + // load tags from repositories file + repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName) + if err != nil { + return err + } + repositoriesFile, err := os.Open(repositoriesPath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + return repositoriesFile.Close() + } + defer repositoriesFile.Close() + + repositories := make(map[string]map[string]string) + if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil { + return err + } + + for name, tagMap := range repositories { + for tag, oldID := range tagMap { + imgID, ok := legacyLoadedMap[oldID] + if !ok { + return fmt.Errorf("invalid target ID: %v", oldID) + } + named, err := reference.WithName(name) + if err != nil { + return err + } + ref, err := reference.WithTag(named, tag) + if err != nil { + return err + } + l.setLoadedTag(ref, imgID, outStream) + } + } + + return nil +} + +func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID, progressOutput progress.Output) error { + if _, loaded := loadedMap[oldID]; loaded { + return nil + } + configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) + if err != nil { + return err + } + imageJSON, err := ioutil.ReadFile(configPath) + if err != nil { + logrus.Debugf("Error reading json: %v", err) + return err + } + + var img struct{ Parent string } + if err := json.Unmarshal(imageJSON, &img); err != nil { + return err + } + + var parentID image.ID + if img.Parent != "" { + for { + var loaded bool + if parentID, loaded = loadedMap[img.Parent]; !loaded { + if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap, progressOutput); err != nil { + return err + } + } else { + break + } + } + } + + // todo: try to connect with migrate code + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := l.is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) + if err != nil { + return err + } + newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, progressOutput) + if err != nil { + return err + } + rootFS.Append(newLayer.DiffID()) + + h, err := v1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + imgID, err := l.is.Create(config) + if err != nil { + return err + } + + metadata, err := l.ls.Release(newLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + return err + } + + if parentID != "" { + if err := l.is.SetParent(imgID, parentID); err != nil { + return err + } + } + + loadedMap[oldID] = imgID + return nil +} + +func safePath(base, path string) (string, error) { + return symlink.FollowSymlinkInScope(filepath.Join(base, path), base) +} + +type parentLink struct { + id, parentID image.ID +} + +func validatedParentLinks(pl []parentLink) (ret []parentLink) { +mainloop: + for i, p := range pl { + ret = append(ret, p) + for _, p2 := range pl { + if p2.id == p.parentID && p2.id != p.id { + continue mainloop + } + } + ret[i].parentID = "" + } + return +} + +func checkValidParent(img, parent *image.Image) bool { + if len(img.History) == 0 && len(parent.History) == 0 { + return true // having history is not mandatory + } + if len(img.History)-len(parent.History) != 1 { + return false + } + for i, h := range parent.History { + if !reflect.DeepEqual(h, img.History[i]) { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/image/tarexport/save.go b/vendor/github.com/docker/docker/image/tarexport/save.go new file mode 100644 index 00000000..9ec3cc9f --- /dev/null +++ b/vendor/github.com/docker/docker/image/tarexport/save.go @@ -0,0 +1,319 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/reference" +) + +type imageDescriptor struct { + refs []reference.NamedTagged + layers []string +} + +type saveSession struct { + *tarexporter + outDir string + images map[image.ID]*imageDescriptor + savedLayers map[string]struct{} +} + +func (l *tarexporter) Save(names []string, outStream io.Writer) error { + images, err := l.parseNames(names) + if err != nil { + return err + } + + return (&saveSession{tarexporter: l, images: images}).save(outStream) +} + +func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) { + imgDescr := make(map[image.ID]*imageDescriptor) + + addAssoc := func(id image.ID, ref reference.Named) { + if _, ok := imgDescr[id]; !ok { + imgDescr[id] = &imageDescriptor{} + } + + if ref != nil { + var tagged reference.NamedTagged + if _, ok := ref.(reference.Canonical); ok { + return + } + var ok bool + if tagged, ok = ref.(reference.NamedTagged); !ok { + var err error + if tagged, err = reference.WithTag(ref, reference.DefaultTag); err != nil { + return + } + } + + for _, t := range imgDescr[id].refs { + if tagged.String() == t.String() { + return + } + } + imgDescr[id].refs = append(imgDescr[id].refs, tagged) + } + } + + for _, name := range names { + id, ref, err := reference.ParseIDOrReference(name) + if err != nil { + return nil, err + } + if id != "" { + _, err := l.is.Get(image.ID(id)) + if err != nil { + return nil, err + } + addAssoc(image.ID(id), nil) + continue + } + if ref.Name() == string(digest.Canonical) { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + continue + } + if reference.IsNameOnly(ref) { + assocs := l.rs.ReferencesByName(ref) + for _, assoc := range assocs { + addAssoc(assoc.ImageID, assoc.Ref) + } + if len(assocs) == 0 { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + } + continue + } + var imgID image.ID + if imgID, err = l.rs.Get(ref); err != nil { + return nil, err + } + addAssoc(imgID, ref) + + } + return imgDescr, nil +} + +func (s *saveSession) save(outStream io.Writer) error { + s.savedLayers = make(map[string]struct{}) + + // get image json + tempDir, err := ioutil.TempDir("", "docker-export-") + if err != nil { + return err + } + defer os.RemoveAll(tempDir) + + s.outDir = tempDir + reposLegacy := make(map[string]map[string]string) + + var manifest []manifestItem + var parentLinks []parentLink + + for id, imageDescr := range s.images { + if err = s.saveImage(id); err != nil { + return err + } + + var repoTags []string + var layers []string + + for _, ref := range imageDescr.refs { + if _, ok := reposLegacy[ref.Name()]; !ok { + reposLegacy[ref.Name()] = make(map[string]string) + } + reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] + repoTags = append(repoTags, ref.String()) + } + + for _, l := range imageDescr.layers { + layers = append(layers, filepath.Join(l, legacyLayerFileName)) + } + + manifest = append(manifest, manifestItem{ + Config: digest.Digest(id).Hex() + ".json", + RepoTags: repoTags, + Layers: layers, + }) + + parentID, _ := s.is.GetParent(id) + parentLinks = append(parentLinks, parentLink{id, parentID}) + } + + for i, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + manifest[i].Parent = p.parentID + } + } + + if len(reposLegacy) > 0 { + reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) + f, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + f.Close() + return err + } + if err := json.NewEncoder(f).Encode(reposLegacy); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + } + + manifestFileName := filepath.Join(tempDir, manifestFileName) + f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + f.Close() + return err + } + if err := json.NewEncoder(f).Encode(manifest); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + + fs, err := archive.Tar(tempDir, archive.Uncompressed) + if err != nil { + return err + } + defer fs.Close() + + if _, err := io.Copy(outStream, fs); err != nil { + return err + } + return nil +} + +func (s *saveSession) saveImage(id image.ID) error { + img, err := s.is.Get(id) + if err != nil { + return err + } + + if len(img.RootFS.DiffIDs) == 0 { + return fmt.Errorf("empty export - not implemented") + } + + var parent digest.Digest + var layers []string + for i := range img.RootFS.DiffIDs { + v1Img := image.V1Image{} + if i == len(img.RootFS.DiffIDs)-1 { + v1Img = img.V1Image + } + rootFS := *img.RootFS + rootFS.DiffIDs = rootFS.DiffIDs[:i+1] + v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) + if err != nil { + return err + } + + v1Img.ID = v1ID.Hex() + if parent != "" { + v1Img.Parent = parent.Hex() + } + + if err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created); err != nil { + return err + } + layers = append(layers, v1Img.ID) + parent = v1ID + } + + configFile := filepath.Join(s.outDir, digest.Digest(id).Hex()+".json") + if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { + return err + } + if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { + return err + } + + s.images[id].layers = layers + return nil +} + +func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) error { + if _, exists := s.savedLayers[legacyImg.ID]; exists { + return nil + } + + outDir := filepath.Join(s.outDir, legacyImg.ID) + if err := os.Mkdir(outDir, 0755); err != nil { + return err + } + + // todo: why is this version file here? + if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { + return err + } + + imageConfig, err := json.Marshal(legacyImg) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { + return err + } + + // serialize filesystem + tarFile, err := os.Create(filepath.Join(outDir, legacyLayerFileName)) + if err != nil { + return err + } + defer tarFile.Close() + + l, err := s.ls.Get(id) + if err != nil { + return err + } + defer layer.ReleaseAndLog(s.ls, l) + + arch, err := l.TarStream() + if err != nil { + return err + } + defer arch.Close() + + if _, err := io.Copy(tarFile, arch); err != nil { + return err + } + + for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { + // todo: maybe save layer created timestamp? + if err := system.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { + return err + } + } + + s.savedLayers[legacyImg.ID] = struct{}{} + return nil +} diff --git a/vendor/github.com/docker/docker/image/tarexport/tarexport.go b/vendor/github.com/docker/docker/image/tarexport/tarexport.go new file mode 100644 index 00000000..5e208777 --- /dev/null +++ b/vendor/github.com/docker/docker/image/tarexport/tarexport.go @@ -0,0 +1,37 @@ +package tarexport + +import ( + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +type manifestItem struct { + Config string + RepoTags []string + Layers []string + Parent image.ID `json:",omitempty"` +} + +type tarexporter struct { + is image.Store + ls layer.Store + rs reference.Store +} + +// NewTarExporter returns new ImageExporter for tar packages +func NewTarExporter(is image.Store, ls layer.Store, rs reference.Store) image.Exporter { + return &tarexporter{ + is: is, + ls: ls, + rs: rs, + } +} diff --git a/vendor/github.com/docker/docker/image/v1/imagev1.go b/vendor/github.com/docker/docker/image/v1/imagev1.go index b7a9529e..e27ebd4c 100644 --- a/vendor/github.com/docker/docker/image/v1/imagev1.go +++ b/vendor/github.com/docker/docker/image/v1/imagev1.go @@ -3,7 +3,6 @@ package v1 import ( "encoding/json" "fmt" - "reflect" "regexp" "strings" @@ -11,7 +10,7 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/docker/image" "github.com/docker/docker/layer" - "github.com/docker/engine-api/types/versions" + "github.com/docker/docker/pkg/version" ) var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) @@ -19,7 +18,7 @@ var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) // noFallbackMinVersion is the minimum version for which v1compatibility // information will not be marshaled through the Image struct to remove // blank fields. -var noFallbackMinVersion = "1.8.3" +var noFallbackMinVersion = version.Version("1.8.3") // HistoryFromConfig creates a History struct from v1 configuration JSON func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { @@ -77,7 +76,7 @@ func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []im return nil, err } - useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion) + useFallback := version.Version(dver.DockerVersion).LessThan(noFallbackMinVersion) if useFallback { var v1Image image.V1Image @@ -119,15 +118,8 @@ func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway } // Delete fields that didn't exist in old manifest - imageType := reflect.TypeOf(img).Elem() - for i := 0; i < imageType.NumField(); i++ { - f := imageType.Field(i) - jsonName := strings.Split(f.Tag.Get("json"), ",")[0] - // Parent is handled specially below. - if jsonName != "" && jsonName != "parent" { - delete(configAsMap, jsonName) - } - } + delete(configAsMap, "rootfs") + delete(configAsMap, "history") configAsMap["id"] = rawJSON(v1ID) if parentV1ID != "" { configAsMap["parent"] = rawJSON(parentV1ID) diff --git a/vendor/github.com/docker/docker/layer/layer.go b/vendor/github.com/docker/docker/layer/layer.go index 5100fe2d..ad01e89a 100644 --- a/vendor/github.com/docker/docker/layer/layer.go +++ b/vendor/github.com/docker/docker/layer/layer.go @@ -171,7 +171,7 @@ type Store interface { Get(ChainID) (Layer, error) Release(Layer) ([]Metadata, error) - CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) + CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error) GetRWLayer(id string) (RWLayer, error) GetMountID(id string) (string, error) ReinitRWLayer(l RWLayer) error diff --git a/vendor/github.com/docker/docker/layer/layer_store.go b/vendor/github.com/docker/docker/layer/layer_store.go index f18aff21..73a1b34c 100644 --- a/vendor/github.com/docker/docker/layer/layer_store.go +++ b/vendor/github.com/docker/docker/layer/layer_store.go @@ -263,7 +263,7 @@ func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { references: map[Layer]struct{}{}, } - if err = ls.driver.Create(layer.cacheID, pid, "", nil); err != nil { + if err = ls.driver.Create(layer.cacheID, pid, ""); err != nil { return nil, err } @@ -417,7 +417,7 @@ func (ls *layerStore) Release(l Layer) ([]Metadata, error) { return ls.releaseLayer(layer) } -func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) { +func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error) { ls.mountL.Lock() defer ls.mountL.Unlock() m, ok := ls.mounts[name] @@ -454,14 +454,14 @@ func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel stri } if initFunc != nil { - pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) + pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc) if err != nil { return nil, err } m.initID = pid } - if err = ls.driver.CreateReadWrite(m.mountID, pid, "", storageOpt); err != nil { + if err = ls.driver.Create(m.mountID, pid, ""); err != nil { return nil, err } @@ -583,14 +583,14 @@ func (ls *layerStore) saveMount(mount *mountedLayer) error { return nil } -func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) { +func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit) (string, error) { // Use "-init" to maintain compatibility with graph drivers // which are expecting this layer with this special name. If all // graph drivers can be updated to not rely on knowing about this layer // then the initID should be randomly generated. initID := fmt.Sprintf("%s-init", graphID) - if err := ls.driver.Create(initID, parent, mountLabel, storageOpt); err != nil { + if err := ls.driver.Create(initID, parent, mountLabel); err != nil { return "", err } p, err := ls.driver.Get(initID, "") diff --git a/vendor/github.com/docker/docker/layer/layer_windows.go b/vendor/github.com/docker/docker/layer/layer_windows.go deleted file mode 100644 index e20311a0..00000000 --- a/vendor/github.com/docker/docker/layer/layer_windows.go +++ /dev/null @@ -1,98 +0,0 @@ -package layer - -import ( - "errors" - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/daemon/graphdriver" -) - -// GetLayerPath returns the path to a layer -func GetLayerPath(s Store, layer ChainID) (string, error) { - ls, ok := s.(*layerStore) - if !ok { - return "", errors.New("unsupported layer store") - } - ls.layerL.Lock() - defer ls.layerL.Unlock() - - rl, ok := ls.layerMap[layer] - if !ok { - return "", ErrLayerDoesNotExist - } - - path, err := ls.driver.Get(rl.cacheID, "") - if err != nil { - return "", err - } - - if err := ls.driver.Put(rl.cacheID); err != nil { - return "", err - } - - return path, nil -} - -func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) { - var err error // this is used for cleanup in existingLayer case - diffID := digest.FromBytes([]byte(graphID)) - - // Create new roLayer - layer := &roLayer{ - cacheID: graphID, - diffID: DiffID(diffID), - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - size: size, - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - defer func() { - if err != nil { - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - layer.chainID = createChainIDFromParent("", layer.diffID) - - if !ls.driver.Exists(layer.cacheID) { - return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID) - } - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil -} - -func (ls *layerStore) mountID(name string) string { - // windows has issues if container ID doesn't match mount ID - return name -} - -func (ls *layerStore) GraphDriver() graphdriver.Driver { - return ls.driver -} diff --git a/vendor/github.com/docker/docker/libcontainerd/client.go b/vendor/github.com/docker/docker/libcontainerd/client.go new file mode 100644 index 00000000..7e8e47bc --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client.go @@ -0,0 +1,46 @@ +package libcontainerd + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/locker" +) + +// clientCommon contains the platform agnostic fields used in the client structure +type clientCommon struct { + backend Backend + containers map[string]*container + locker *locker.Locker + mapMutex sync.RWMutex // protects read/write oprations from containers map +} + +func (clnt *client) lock(containerID string) { + clnt.locker.Lock(containerID) +} + +func (clnt *client) unlock(containerID string) { + clnt.locker.Unlock(containerID) +} + +// must hold a lock for cont.containerID +func (clnt *client) appendContainer(cont *container) { + clnt.mapMutex.Lock() + clnt.containers[cont.containerID] = cont + clnt.mapMutex.Unlock() +} +func (clnt *client) deleteContainer(friendlyName string) { + clnt.mapMutex.Lock() + delete(clnt.containers, friendlyName) + clnt.mapMutex.Unlock() +} + +func (clnt *client) getContainer(containerID string) (*container, error) { + clnt.mapMutex.RLock() + container, ok := clnt.containers[containerID] + defer clnt.mapMutex.RUnlock() + if !ok { + return nil, fmt.Errorf("invalid container: %s", containerID) // fixme: typed error + } + return container, nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_linux.go b/vendor/github.com/docker/docker/libcontainerd/client_linux.go new file mode 100644 index 00000000..8eab7512 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_linux.go @@ -0,0 +1,401 @@ +package libcontainerd + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/opencontainers/specs/specs-go" + "golang.org/x/net/context" +) + +type client struct { + clientCommon + + // Platform specific properties below here. + remote *remote + q queue + exitNotifiers map[string]*exitNotifier +} + +func (clnt *client) AddProcess(containerID, processFriendlyName string, specp Process) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + spec, err := container.spec() + if err != nil { + return err + } + sp := spec.Process + sp.Args = specp.Args + sp.Terminal = specp.Terminal + if specp.Env != nil { + sp.Env = specp.Env + } + if specp.Cwd != nil { + sp.Cwd = *specp.Cwd + } + if specp.User != nil { + sp.User = specs.User{ + UID: specp.User.UID, + GID: specp.User.GID, + AdditionalGids: specp.User.AdditionalGids, + } + } + if specp.Capabilities != nil { + sp.Capabilities = specp.Capabilities + } + + p := container.newProcess(processFriendlyName) + + r := &containerd.AddProcessRequest{ + Args: sp.Args, + Cwd: sp.Cwd, + Terminal: sp.Terminal, + Id: containerID, + Env: sp.Env, + User: &containerd.User{ + Uid: sp.User.UID, + Gid: sp.User.GID, + AdditionalGids: sp.User.AdditionalGids, + }, + Pid: processFriendlyName, + Stdin: p.fifo(syscall.Stdin), + Stdout: p.fifo(syscall.Stdout), + Stderr: p.fifo(syscall.Stderr), + Capabilities: sp.Capabilities, + ApparmorProfile: sp.ApparmorProfile, + SelinuxLabel: sp.SelinuxLabel, + NoNewPrivileges: sp.NoNewPrivileges, + Rlimits: convertRlimits(sp.Rlimits), + } + + iopipe, err := p.openFifos(sp.Terminal) + if err != nil { + return err + } + + if _, err := clnt.remote.apiClient.AddProcess(context.Background(), r); err != nil { + p.closeFifos(iopipe) + return err + } + + container.processes[processFriendlyName] = p + + clnt.unlock(containerID) + + if err := clnt.backend.AttachStreams(processFriendlyName, *iopipe); err != nil { + return err + } + clnt.lock(containerID) + + return nil +} + +func (clnt *client) prepareBundleDir(uid, gid int) (string, error) { + root, err := filepath.Abs(clnt.remote.stateDir) + if err != nil { + return "", err + } + if uid == 0 && gid == 0 { + return root, nil + } + p := string(filepath.Separator) + for _, d := range strings.Split(root, string(filepath.Separator))[1:] { + p = filepath.Join(p, d) + fi, err := os.Stat(p) + if err != nil && !os.IsNotExist(err) { + return "", err + } + if os.IsNotExist(err) || fi.Mode()&1 == 0 { + p = fmt.Sprintf("%s.%d.%d", p, uid, gid) + if err := idtools.MkdirAs(p, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + } + } + return p, nil +} + +func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) (err error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + if ctr, err := clnt.getContainer(containerID); err == nil { + if ctr.restarting { + ctr.restartManager.Cancel() + ctr.clean() + } else { + return fmt.Errorf("Container %s is aleady active", containerID) + } + } + + uid, gid, err := getRootIDs(specs.Spec(spec)) + if err != nil { + return err + } + dir, err := clnt.prepareBundleDir(uid, gid) + if err != nil { + return err + } + + container := clnt.newContainer(filepath.Join(dir, containerID), options...) + if err := container.clean(); err != nil { + return err + } + + defer func() { + if err != nil { + container.clean() + clnt.deleteContainer(containerID) + } + }() + + if err := idtools.MkdirAllAs(container.dir, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + + f, err := os.Create(filepath.Join(container.dir, configFilename)) + if err != nil { + return err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(spec); err != nil { + return err + } + + return container.start() +} + +func (clnt *client) Signal(containerID string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ + Id: containerID, + Pid: InitFriendlyName, + Signal: uint32(sig), + }) + return err +} + +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + _, err := clnt.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: containerID, + Pid: processFriendlyName, + Width: uint32(width), + Height: uint32(height), + }) + return err +} + +func (clnt *client) Pause(containerID string) error { + return clnt.setState(containerID, StatePause) +} + +func (clnt *client) setState(containerID, state string) error { + clnt.lock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + clnt.unlock(containerID) + return err + } + if container.systemPid == 0 { + clnt.unlock(containerID) + return fmt.Errorf("No active process for container %s", containerID) + } + st := "running" + if state == StatePause { + st = "paused" + } + chstate := make(chan struct{}) + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Status: st, + }) + if err != nil { + clnt.unlock(containerID) + return err + } + container.pauseMonitor.append(state, chstate) + clnt.unlock(containerID) + <-chstate + return nil +} + +func (clnt *client) Resume(containerID string) error { + return clnt.setState(containerID, StateResume) +} + +func (clnt *client) Stats(containerID string) (*Stats, error) { + resp, err := clnt.remote.apiClient.Stats(context.Background(), &containerd.StatsRequest{containerID}) + if err != nil { + return nil, err + } + return (*Stats)(resp), nil +} + +// Take care of the old 1.11.0 behavior in case the version upgrade +// happenned without a clean daemon shutdown +func (clnt *client) cleanupOldRootfs(containerID string) { + // Unmount and delete the bundle folder + if mts, err := mount.GetMounts(); err == nil { + for _, mts := range mts { + if strings.HasSuffix(mts.Mountpoint, containerID+"/rootfs") { + if err := syscall.Unmount(mts.Mountpoint, syscall.MNT_DETACH); err == nil { + os.RemoveAll(strings.TrimSuffix(mts.Mountpoint, "/rootfs")) + } + break + } + } + } +} + +func (clnt *client) setExited(containerID string) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + var exitCode uint32 + if event, ok := clnt.remote.pastEvents[containerID]; ok { + exitCode = event.Status + delete(clnt.remote.pastEvents, containerID) + } + + err := clnt.backend.StateChanged(containerID, StateInfo{ + State: StateExit, + ExitCode: exitCode, + }) + + clnt.cleanupOldRootfs(containerID) + + return err +} + +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + cont, err := clnt.getContainerdContainer(containerID) + if err != nil { + return nil, err + } + pids := make([]int, len(cont.Pids)) + for i, p := range cont.Pids { + pids[i] = int(p) + } + return pids, nil +} + +// Summary returns a summary of the processes running in a container. +// This is a no-op on Linux. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + return nil, nil +} + +func (clnt *client) getContainerdContainer(containerID string) (*containerd.Container, error) { + resp, err := clnt.remote.apiClient.State(context.Background(), &containerd.StateRequest{Id: containerID}) + if err != nil { + return nil, err + } + for _, cont := range resp.Containers { + if cont.Id == containerID { + return cont, nil + } + } + return nil, fmt.Errorf("invalid state response") +} + +func (clnt *client) newContainer(dir string, options ...CreateOption) *container { + container := &container{ + containerCommon: containerCommon{ + process: process{ + dir: dir, + processCommon: processCommon{ + containerID: filepath.Base(dir), + client: clnt, + friendlyName: InitFriendlyName, + }, + }, + processes: make(map[string]*process), + }, + } + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Error(err) + } + } + return container +} + +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + if container.systemPid == 0 { + return fmt.Errorf("No active process for container %s", containerID) + } + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Resources: (*containerd.UpdateResource)(&resources), + }) + if err != nil { + return err + } + return nil +} + +func (clnt *client) getExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.RLock() + defer clnt.mapMutex.RUnlock() + return clnt.exitNotifiers[containerID] +} + +func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.Lock() + w, ok := clnt.exitNotifiers[containerID] + defer clnt.mapMutex.Unlock() + if !ok { + w = &exitNotifier{c: make(chan struct{}), client: clnt} + clnt.exitNotifiers[containerID] = w + } + return w +} + +type exitNotifier struct { + id string + client *client + c chan struct{} + once sync.Once +} + +func (en *exitNotifier) close() { + en.once.Do(func() { + close(en.c) + en.client.mapMutex.Lock() + if en == en.client.exitNotifiers[en.id] { + delete(en.client.exitNotifiers, en.id) + } + en.client.mapMutex.Unlock() + }) +} +func (en *exitNotifier) wait() <-chan struct{} { + return en.c +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_liverestore_linux.go b/vendor/github.com/docker/docker/libcontainerd/client_liverestore_linux.go new file mode 100644 index 00000000..1a1f7fe7 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_liverestore_linux.go @@ -0,0 +1,83 @@ +// +build experimental + +package libcontainerd + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" +) + +func (clnt *client) restore(cont *containerd.Container, options ...CreateOption) (err error) { + clnt.lock(cont.Id) + defer clnt.unlock(cont.Id) + + logrus.Debugf("restore container %s state %s", cont.Id, cont.Status) + + containerID := cont.Id + if _, err := clnt.getContainer(containerID); err == nil { + return fmt.Errorf("container %s is aleady active", containerID) + } + + defer func() { + if err != nil { + clnt.deleteContainer(cont.Id) + } + }() + + container := clnt.newContainer(cont.BundlePath, options...) + container.systemPid = systemPid(cont) + + var terminal bool + for _, p := range cont.Processes { + if p.Pid == InitFriendlyName { + terminal = p.Terminal + } + } + + iopipe, err := container.openFifos(terminal) + if err != nil { + return err + } + + if err := clnt.backend.AttachStreams(containerID, *iopipe); err != nil { + return err + } + + clnt.appendContainer(container) + + err = clnt.backend.StateChanged(containerID, StateInfo{ + State: StateRestore, + Pid: container.systemPid, + }) + + if err != nil { + return err + } + + if event, ok := clnt.remote.pastEvents[containerID]; ok { + // This should only be a pause or resume event + if event.Type == StatePause || event.Type == StateResume { + return clnt.backend.StateChanged(containerID, StateInfo{ + State: event.Type, + Pid: container.systemPid, + }) + } + + logrus.Warnf("unexpected backlog event: %#v", event) + } + + return nil +} + +func (clnt *client) Restore(containerID string, options ...CreateOption) error { + cont, err := clnt.getContainerdContainer(containerID) + if err == nil && cont.Status != "stopped" { + if err := clnt.restore(cont, options...); err != nil { + logrus.Errorf("error restoring %s: %v", containerID, err) + } + return nil + } + return clnt.setExited(containerID) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_shutdownrestore_linux.go b/vendor/github.com/docker/docker/libcontainerd/client_shutdownrestore_linux.go new file mode 100644 index 00000000..52ea2a61 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_shutdownrestore_linux.go @@ -0,0 +1,41 @@ +// +build !experimental + +package libcontainerd + +import ( + "syscall" + "time" + + "github.com/Sirupsen/logrus" +) + +func (clnt *client) Restore(containerID string, options ...CreateOption) error { + w := clnt.getOrCreateExitNotifier(containerID) + defer w.close() + cont, err := clnt.getContainerdContainer(containerID) + if err == nil && cont.Status != "stopped" { + clnt.lock(cont.Id) + container := clnt.newContainer(cont.BundlePath) + container.systemPid = systemPid(cont) + clnt.appendContainer(container) + clnt.unlock(cont.Id) + + if err := clnt.Signal(containerID, int(syscall.SIGTERM)); err != nil { + logrus.Errorf("error sending sigterm to %v: %v", containerID, err) + } + select { + case <-time.After(10 * time.Second): + if err := clnt.Signal(containerID, int(syscall.SIGKILL)); err != nil { + logrus.Errorf("error sending sigkill to %v: %v", containerID, err) + } + select { + case <-time.After(2 * time.Second): + case <-w.wait(): + return nil + } + case <-w.wait(): + return nil + } + } + return clnt.setExited(containerID) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/container.go b/vendor/github.com/docker/docker/libcontainerd/container.go new file mode 100644 index 00000000..30bc9502 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/container.go @@ -0,0 +1,40 @@ +package libcontainerd + +import ( + "fmt" + "time" + + "github.com/docker/docker/restartmanager" +) + +const ( + // InitFriendlyName is the name given in the lookup map of processes + // for the first process started in a container. + InitFriendlyName = "init" + configFilename = "config.json" +) + +type containerCommon struct { + process + restartManager restartmanager.RestartManager + restarting bool + processes map[string]*process + startedAt time.Time +} + +// WithRestartManager sets the restartmanager to be used with the container. +func WithRestartManager(rm restartmanager.RestartManager) CreateOption { + return restartManager{rm} +} + +type restartManager struct { + rm restartmanager.RestartManager +} + +func (rm restartManager) Apply(p interface{}) error { + if pr, ok := p.(*container); ok { + pr.restartManager = rm.rm + return nil + } + return fmt.Errorf("WithRestartManager option not supported for this client") +} diff --git a/vendor/github.com/docker/docker/libcontainerd/container_linux.go b/vendor/github.com/docker/docker/libcontainerd/container_linux.go new file mode 100644 index 00000000..7fa6770b --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/container_linux.go @@ -0,0 +1,193 @@ +package libcontainerd + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/restartmanager" + "github.com/opencontainers/specs/specs-go" + "golang.org/x/net/context" +) + +type container struct { + containerCommon + + // Platform specific fields are below here. + pauseMonitor + oom bool +} + +func (ctr *container) clean() error { + if os.Getenv("LIBCONTAINERD_NOCLEAN") == "1" { + return nil + } + if _, err := os.Lstat(ctr.dir); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + if err := os.RemoveAll(ctr.dir); err != nil { + return err + } + return nil +} + +// cleanProcess removes the fifos used by an additional process. +// Caller needs to lock container ID before calling this method. +func (ctr *container) cleanProcess(id string) { + if p, ok := ctr.processes[id]; ok { + for _, i := range []int{syscall.Stdin, syscall.Stdout, syscall.Stderr} { + if err := os.Remove(p.fifo(i)); err != nil { + logrus.Warnf("failed to remove %v for process %v: %v", p.fifo(i), id, err) + } + } + } + delete(ctr.processes, id) +} + +func (ctr *container) spec() (*specs.Spec, error) { + var spec specs.Spec + dt, err := ioutil.ReadFile(filepath.Join(ctr.dir, configFilename)) + if err != nil { + return nil, err + } + if err := json.Unmarshal(dt, &spec); err != nil { + return nil, err + } + return &spec, nil +} + +func (ctr *container) start() error { + spec, err := ctr.spec() + if err != nil { + return nil + } + iopipe, err := ctr.openFifos(spec.Process.Terminal) + if err != nil { + return err + } + + r := &containerd.CreateContainerRequest{ + Id: ctr.containerID, + BundlePath: ctr.dir, + Stdin: ctr.fifo(syscall.Stdin), + Stdout: ctr.fifo(syscall.Stdout), + Stderr: ctr.fifo(syscall.Stderr), + // check to see if we are running in ramdisk to disable pivot root + NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "", + } + ctr.client.appendContainer(ctr) + + resp, err := ctr.client.remote.apiClient.CreateContainer(context.Background(), r) + if err != nil { + ctr.closeFifos(iopipe) + return err + } + ctr.startedAt = time.Now() + + if err := ctr.client.backend.AttachStreams(ctr.containerID, *iopipe); err != nil { + return err + } + ctr.systemPid = systemPid(resp.Container) + + return ctr.client.backend.StateChanged(ctr.containerID, StateInfo{ + State: StateStart, + Pid: ctr.systemPid, + }) +} + +func (ctr *container) newProcess(friendlyName string) *process { + return &process{ + dir: ctr.dir, + processCommon: processCommon{ + containerID: ctr.containerID, + friendlyName: friendlyName, + client: ctr.client, + }, + } +} + +func (ctr *container) handleEvent(e *containerd.Event) error { + ctr.client.lock(ctr.containerID) + defer ctr.client.unlock(ctr.containerID) + switch e.Type { + case StateExit, StatePause, StateResume, StateOOM: + st := StateInfo{ + State: e.Type, + ExitCode: e.Status, + OOMKilled: e.Type == StateExit && ctr.oom, + } + if e.Type == StateOOM { + ctr.oom = true + } + if e.Type == StateExit && e.Pid != InitFriendlyName { + st.ProcessID = e.Pid + st.State = StateExitProcess + } + if st.State == StateExit && ctr.restartManager != nil { + restart, wait, err := ctr.restartManager.ShouldRestart(e.Status, false, time.Since(ctr.startedAt)) + if err != nil { + logrus.Warnf("container %s %v", ctr.containerID, err) + } else if restart { + st.State = StateRestart + ctr.restarting = true + ctr.client.deleteContainer(e.Id) + go func() { + err := <-wait + ctr.client.lock(ctr.containerID) + defer ctr.client.unlock(ctr.containerID) + ctr.restarting = false + if err != nil { + st.State = StateExit + ctr.clean() + ctr.client.q.append(e.Id, func() { + if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { + logrus.Error(err) + } + }) + if err != restartmanager.ErrRestartCanceled { + logrus.Error(err) + } + } else { + ctr.start() + } + }() + } + } + + // Remove process from list if we have exited + // We need to do so here in case the Message Handler decides to restart it. + switch st.State { + case StateExit: + ctr.clean() + ctr.client.deleteContainer(e.Id) + case StateExitProcess: + ctr.cleanProcess(st.ProcessID) + } + ctr.client.q.append(e.Id, func() { + if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { + logrus.Error(err) + } + if e.Type == StatePause || e.Type == StateResume { + ctr.pauseMonitor.handle(e.Type) + } + if e.Type == StateExit { + if en := ctr.client.getExitNotifier(e.Id); en != nil { + en.close() + } + } + }) + + default: + logrus.Debugf("event unhandled: %+v", e) + } + return nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/pausemonitor_linux.go b/vendor/github.com/docker/docker/libcontainerd/pausemonitor_linux.go new file mode 100644 index 00000000..379cbf1f --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/pausemonitor_linux.go @@ -0,0 +1,31 @@ +package libcontainerd + +// pauseMonitor is helper to get notifications from pause state changes. +type pauseMonitor struct { + waiters map[string][]chan struct{} +} + +func (m *pauseMonitor) handle(t string) { + if m.waiters == nil { + return + } + q, ok := m.waiters[t] + if !ok { + return + } + if len(q) > 0 { + close(q[0]) + m.waiters[t] = q[1:] + } +} + +func (m *pauseMonitor) append(t string, waiter chan struct{}) { + if m.waiters == nil { + m.waiters = make(map[string][]chan struct{}) + } + _, ok := m.waiters[t] + if !ok { + m.waiters[t] = make([]chan struct{}, 0) + } + m.waiters[t] = append(m.waiters[t], waiter) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/process.go b/vendor/github.com/docker/docker/libcontainerd/process.go new file mode 100644 index 00000000..57562c87 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/process.go @@ -0,0 +1,18 @@ +package libcontainerd + +// processCommon are the platform common fields as part of the process structure +// which keeps the state for the main container process, as well as any exec +// processes. +type processCommon struct { + client *client + + // containerID is the Container ID + containerID string + + // friendlyName is an identifier for the process (or `InitFriendlyName` + // for the first process) + friendlyName string + + // systemPid is the PID of the main container process + systemPid uint32 +} diff --git a/vendor/github.com/docker/docker/libcontainerd/process_linux.go b/vendor/github.com/docker/docker/libcontainerd/process_linux.go new file mode 100644 index 00000000..3c48576f --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/process_linux.go @@ -0,0 +1,110 @@ +package libcontainerd + +import ( + "fmt" + "io" + "os" + "path/filepath" + "syscall" + + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +var fdNames = map[int]string{ + syscall.Stdin: "stdin", + syscall.Stdout: "stdout", + syscall.Stderr: "stderr", +} + +// process keeps the state for both main container process and exec process. +type process struct { + processCommon + + // Platform specific fields are below here. + dir string +} + +func (p *process) openFifos(terminal bool) (*IOPipe, error) { + bundleDir := p.dir + if err := os.MkdirAll(bundleDir, 0700); err != nil { + return nil, err + } + + for i := 0; i < 3; i++ { + f := p.fifo(i) + if err := syscall.Mkfifo(f, 0700); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("mkfifo: %s %v", f, err) + } + } + + io := &IOPipe{} + stdinf, err := os.OpenFile(p.fifo(syscall.Stdin), syscall.O_RDWR, 0) + if err != nil { + return nil, err + } + + io.Stdout = openReaderFromFifo(p.fifo(syscall.Stdout)) + if !terminal { + io.Stderr = openReaderFromFifo(p.fifo(syscall.Stderr)) + } else { + io.Stderr = emptyReader{} + } + + io.Stdin = ioutils.NewWriteCloserWrapper(stdinf, func() error { + stdinf.Close() + _, err := p.client.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: p.containerID, + Pid: p.friendlyName, + CloseStdin: true, + }) + return err + }) + + return io, nil +} + +func (p *process) closeFifos(io *IOPipe) { + io.Stdin.Close() + closeReaderFifo(p.fifo(syscall.Stdout)) + closeReaderFifo(p.fifo(syscall.Stderr)) +} + +type emptyReader struct{} + +func (r emptyReader) Read(b []byte) (int, error) { + return 0, io.EOF +} + +func openReaderFromFifo(fn string) io.Reader { + r, w := io.Pipe() + c := make(chan struct{}) + go func() { + close(c) + stdoutf, err := os.OpenFile(fn, syscall.O_RDONLY, 0) + if err != nil { + r.CloseWithError(err) + } + if _, err := io.Copy(w, stdoutf); err != nil { + r.CloseWithError(err) + } + w.Close() + stdoutf.Close() + }() + <-c // wait for the goroutine to get scheduled and syscall to block + return r +} + +// closeReaderFifo closes fifo that may be blocked on open by opening the write side. +func closeReaderFifo(fn string) { + f, err := os.OpenFile(fn, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) + if err != nil { + return + } + f.Close() +} + +func (p *process) fifo(index int) string { + return filepath.Join(p.dir, p.friendlyName+"-"+fdNames[index]) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/queue_linux.go b/vendor/github.com/docker/docker/libcontainerd/queue_linux.go new file mode 100644 index 00000000..34bc81d2 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/queue_linux.go @@ -0,0 +1,29 @@ +package libcontainerd + +import "sync" + +type queue struct { + sync.Mutex + fns map[string]chan struct{} +} + +func (q *queue) append(id string, f func()) { + q.Lock() + defer q.Unlock() + + if q.fns == nil { + q.fns = make(map[string]chan struct{}) + } + + done := make(chan struct{}) + + fn, ok := q.fns[id] + q.fns[id] = done + go func() { + if ok { + <-fn + } + f() + close(done) + }() +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote.go b/vendor/github.com/docker/docker/libcontainerd/remote.go new file mode 100644 index 00000000..a679edcf --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote.go @@ -0,0 +1,18 @@ +package libcontainerd + +// Remote on Linux defines the accesspoint to the containerd grpc API. +// Remote on Windows is largely an unimplemented interface as there is +// no remote containerd. +type Remote interface { + // Client returns a new Client instance connected with given Backend. + Client(Backend) (Client, error) + // Cleanup stops containerd if it was started by libcontainerd. + // Note this is not used on Windows as there is no remote containerd. + Cleanup() +} + +// RemoteOption allows to configure paramters of remotes. +// This is unused on Windows. +type RemoteOption interface { + Apply(Remote) error +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_linux.go b/vendor/github.com/docker/docker/libcontainerd/remote_linux.go new file mode 100644 index 00000000..fa339ce4 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote_linux.go @@ -0,0 +1,290 @@ +package libcontainerd + +import ( + "fmt" + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/locker" + sysinfo "github.com/docker/docker/pkg/system" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/transport" +) + +const ( + maxConnectionRetryCount = 3 + connectionRetryDelay = 3 * time.Second + containerdShutdownTimeout = 15 * time.Second + containerdBinary = "docker-containerd" + containerdPidFilename = "docker-containerd.pid" + containerdSockFilename = "docker-containerd.sock" + eventTimestampFilename = "event.ts" +) + +type remote struct { + sync.RWMutex + apiClient containerd.APIClient + daemonPid int + stateDir string + rpcAddr string + startDaemon bool + closeManually bool + debugLog bool + rpcConn *grpc.ClientConn + clients []*client + eventTsPath string + pastEvents map[string]*containerd.Event + runtimeArgs []string +} + +// New creates a fresh instance of libcontainerd remote. +func New(stateDir string, options ...RemoteOption) (_ Remote, err error) { + defer func() { + if err != nil { + err = fmt.Errorf("Failed to connect to containerd. Please make sure containerd is installed in your PATH or you have specificed the correct address. Got error: %v", err) + } + }() + r := &remote{ + stateDir: stateDir, + daemonPid: -1, + eventTsPath: filepath.Join(stateDir, eventTimestampFilename), + pastEvents: make(map[string]*containerd.Event), + } + for _, option := range options { + if err := option.Apply(r); err != nil { + return nil, err + } + } + + if err := sysinfo.MkdirAll(stateDir, 0700); err != nil { + return nil, err + } + + if r.rpcAddr == "" { + r.rpcAddr = filepath.Join(stateDir, containerdSockFilename) + } + + if r.startDaemon { + if err := r.runContainerdDaemon(); err != nil { + return nil, err + } + } + + if err := r.startEventsMonitor(); err != nil { + return nil, err + } + + return r, nil +} + +func (r *remote) Cleanup() { +} + +func (r *remote) Client(b Backend) (Client, error) { + c := &client{ + clientCommon: clientCommon{ + backend: b, + containers: make(map[string]*container), + locker: locker.New(), + }, + remote: r, + exitNotifiers: make(map[string]*exitNotifier), + } + + r.Lock() + r.clients = append(r.clients, c) + r.Unlock() + return c, nil +} + +func (r *remote) updateEventTimestamp(t time.Time) { + f, err := os.OpenFile(r.eventTsPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_TRUNC, 0600) + defer f.Close() + if err != nil { + logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err) + return + } + + b, err := t.MarshalText() + if err != nil { + logrus.Warnf("libcontainerd: failed to encode timestamp: %v", err) + return + } + + n, err := f.Write(b) + if err != nil || n != len(b) { + logrus.Warnf("libcontainerd: failed to update event timestamp file: %v", err) + f.Truncate(0) + return + } + +} + +func (r *remote) getLastEventTimestamp() int64 { + t := time.Now() + + fi, err := os.Stat(r.eventTsPath) + if os.IsNotExist(err) || fi.Size() == 0 { + return t.Unix() + } + + f, err := os.Open(r.eventTsPath) + defer f.Close() + if err != nil { + logrus.Warn("libcontainerd: Unable to access last event ts: %v", err) + return t.Unix() + } + + b := make([]byte, fi.Size()) + n, err := f.Read(b) + if err != nil || n != len(b) { + logrus.Warn("libcontainerd: Unable to read last event ts: %v", err) + return t.Unix() + } + + t.UnmarshalText(b) + + return t.Unix() +} + +func (r *remote) startEventsMonitor() error { + // First, get past events + er := &containerd.EventsRequest{ + Timestamp: uint64(r.getLastEventTimestamp()), + } + events, err := r.apiClient.Events(context.Background(), er) + if err != nil { + return err + } + go r.handleEventStream(events) + return nil +} + +func (r *remote) handleEventStream(events containerd.API_EventsClient) { + live := false + for { + e, err := events.Recv() + if err != nil { + if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc && + r.closeManually { + // ignore error if grpc remote connection is closed manually + return + } + logrus.Errorf("failed to receive event from containerd: %v", err) + go r.startEventsMonitor() + return + } + + if live == false { + logrus.Debugf("received past containerd event: %#v", e) + + // Pause/Resume events should never happens after exit one + switch e.Type { + case StateExit: + r.pastEvents[e.Id] = e + case StatePause: + r.pastEvents[e.Id] = e + case StateResume: + r.pastEvents[e.Id] = e + case stateLive: + live = true + r.updateEventTimestamp(time.Unix(int64(e.Timestamp), 0)) + } + } else { + logrus.Debugf("received containerd event: %#v", e) + + var container *container + var c *client + r.RLock() + for _, c = range r.clients { + container, err = c.getContainer(e.Id) + if err == nil { + break + } + } + r.RUnlock() + if container == nil { + logrus.Errorf("no state for container: %q", err) + continue + } + + if err := container.handleEvent(e); err != nil { + logrus.Errorf("error processing state change for %s: %v", e.Id, err) + } + + r.updateEventTimestamp(time.Unix(int64(e.Timestamp), 0)) + } + } +} + +func (r *remote) runContainerdDaemon() error { + var err error + r.apiClient, err = newBridge(stateDir, 10, "docker-runc", r.runtimeArgs) + return err +} + +// WithRemoteAddr sets the external containerd socket to connect to. +func WithRemoteAddr(addr string) RemoteOption { + return rpcAddr(addr) +} + +type rpcAddr string + +func (a rpcAddr) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.rpcAddr = string(a) + return nil + } + return fmt.Errorf("WithRemoteAddr option not supported for this remote") +} + +// WithRuntimeArgs sets the list of runtime args passed to containerd +func WithRuntimeArgs(args []string) RemoteOption { + return runtimeArgs(args) +} + +type runtimeArgs []string + +func (rt runtimeArgs) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.runtimeArgs = rt + return nil + } + return fmt.Errorf("WithRuntimeArgs option not supported for this remote") +} + +// WithStartDaemon defines if libcontainerd should also run containerd daemon. +func WithStartDaemon(start bool) RemoteOption { + return startDaemon(start) +} + +type startDaemon bool + +func (s startDaemon) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.startDaemon = bool(s) + return nil + } + return fmt.Errorf("WithStartDaemon option not supported for this remote") +} + +// WithDebugLog defines if containerd debug logs will be enabled for daemon. +func WithDebugLog(debug bool) RemoteOption { + return debugLog(debug) +} + +type debugLog bool + +func (d debugLog) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.debugLog = bool(d) + return nil + } + return fmt.Errorf("WithDebugLog option not supported for this remote") +} diff --git a/vendor/github.com/docker/docker/libcontainerd/rpc_bridge.go b/vendor/github.com/docker/docker/libcontainerd/rpc_bridge.go new file mode 100644 index 00000000..d745298a --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/rpc_bridge.go @@ -0,0 +1,48 @@ +package libcontainerd + +import ( + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/api/grpc/server" + "github.com/docker/containerd/api/grpc/types" + "github.com/docker/containerd/subreaper" + "github.com/docker/containerd/supervisor" +) + +var ( + stateDir = "/run/containerd" +) + +type bridge struct { + s types.APIServer +} + +func newBridge(stateDir string, concurrency int, runtimeName string, runtimeArgs []string) (types.APIClient, error) { + s, err := daemon(stateDir, concurrency, runtimeName, runtimeArgs) + if err != nil { + return nil, err + } + return &bridge{s: s}, nil +} + +func daemon(stateDir string, concurrency int, runtimeName string, runtimeArgs []string) (types.APIServer, error) { + if err := subreaper.Start(); err != nil { + logrus.WithField("error", err).Error("containerd: start subreaper") + } + sv, err := supervisor.New(stateDir, runtimeName, "", runtimeArgs, 15*time.Second, 500) + if err != nil { + return nil, err + } + wg := &sync.WaitGroup{} + for i := 0; i < concurrency; i++ { + wg.Add(1) + w := supervisor.NewWorker(sv, wg) + go w.Start() + } + if err := sv.Start(); err != nil { + return nil, err + } + return server.NewServer(sv), nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/rpc_bridge_wrapper.go b/vendor/github.com/docker/docker/libcontainerd/rpc_bridge_wrapper.go new file mode 100644 index 00000000..0000bbd6 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/rpc_bridge_wrapper.go @@ -0,0 +1,131 @@ +package libcontainerd + +import ( + "io" + + "github.com/docker/containerd/api/grpc/types" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +func (b *bridge) CreateContainer(ctx context.Context, in *types.CreateContainerRequest, opts ...grpc.CallOption) (*types.CreateContainerResponse, error) { + return b.s.CreateContainer(ctx, in) +} + +func (b *bridge) UpdateContainer(ctx context.Context, in *types.UpdateContainerRequest, opts ...grpc.CallOption) (*types.UpdateContainerResponse, error) { + return b.s.UpdateContainer(ctx, in) +} + +func (b *bridge) Signal(ctx context.Context, in *types.SignalRequest, opts ...grpc.CallOption) (*types.SignalResponse, error) { + return b.s.Signal(ctx, in) +} + +func (b *bridge) UpdateProcess(ctx context.Context, in *types.UpdateProcessRequest, opts ...grpc.CallOption) (*types.UpdateProcessResponse, error) { + return b.s.UpdateProcess(ctx, in) +} + +func (b *bridge) AddProcess(ctx context.Context, in *types.AddProcessRequest, opts ...grpc.CallOption) (*types.AddProcessResponse, error) { + return b.s.AddProcess(ctx, in) +} + +func (b *bridge) CreateCheckpoint(ctx context.Context, in *types.CreateCheckpointRequest, opts ...grpc.CallOption) (*types.CreateCheckpointResponse, error) { + return b.s.CreateCheckpoint(ctx, in) +} + +func (b *bridge) DeleteCheckpoint(ctx context.Context, in *types.DeleteCheckpointRequest, opts ...grpc.CallOption) (*types.DeleteCheckpointResponse, error) { + return b.s.DeleteCheckpoint(ctx, in) +} + +func (b *bridge) ListCheckpoint(ctx context.Context, in *types.ListCheckpointRequest, opts ...grpc.CallOption) (*types.ListCheckpointResponse, error) { + return b.s.ListCheckpoint(ctx, in) +} + +func (b *bridge) State(ctx context.Context, in *types.StateRequest, opts ...grpc.CallOption) (*types.StateResponse, error) { + return b.s.State(ctx, in) +} + +func (b *bridge) GetServerVersion(ctx context.Context, in *types.GetServerVersionRequest, opts ...grpc.CallOption) (*types.GetServerVersionResponse, error) { + return b.s.GetServerVersion(ctx, in) +} + +func (b *bridge) Events(ctx context.Context, in *types.EventsRequest, opts ...grpc.CallOption) (types.API_EventsClient, error) { + c := make(chan *types.Event, 1024) + client := &aPI_EventsClient{ + c: c, + } + client.ctx = ctx + server := &aPI_EventsServer{ + c: c, + } + server.ctx = ctx + go b.s.Events(in, server) + return client, nil +} + +func (b *bridge) Stats(ctx context.Context, in *types.StatsRequest, opts ...grpc.CallOption) (*types.StatsResponse, error) { + return b.s.Stats(ctx, in) +} + +type aPI_EventsServer struct { + serverStream + c chan *types.Event +} + +func (a *aPI_EventsServer) Send(msg *types.Event) error { + a.c <- msg + return nil +} + +type serverStream struct { + stream +} + +func (s *serverStream) SendHeader(metadata.MD) error { + return nil +} + +func (s *serverStream) SetTrailer(metadata.MD) { +} + +type aPI_EventsClient struct { + clientStream + c chan *types.Event +} + +func (a *aPI_EventsClient) Recv() (*types.Event, error) { + e, ok := <-a.c + if !ok { + return nil, io.EOF + } + return e, nil +} + +type stream struct { + ctx context.Context +} + +func (s *stream) Context() context.Context { + return s.ctx +} +func (s *stream) SendMsg(m interface{}) error { + return nil +} +func (s *stream) RecvMsg(m interface{}) error { + return nil +} + +// ClientStream defines the interface a client stream has to satify. +type clientStream struct { + stream +} + +func (c *clientStream) Header() (metadata.MD, error) { + return nil, nil +} +func (c *clientStream) Trailer() metadata.MD { + return nil +} +func (c *clientStream) CloseSend() error { + return nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/types.go b/vendor/github.com/docker/docker/libcontainerd/types.go new file mode 100644 index 00000000..bb82fe40 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/types.go @@ -0,0 +1,60 @@ +package libcontainerd + +import "io" + +// State constants used in state change reporting. +const ( + StateStart = "start-container" + StatePause = "pause" + StateResume = "resume" + StateExit = "exit" + StateRestart = "restart" + StateRestore = "restore" + StateStartProcess = "start-process" + StateExitProcess = "exit-process" + StateOOM = "oom" // fake state + stateLive = "live" +) + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { // FIXME: event? + State string + Pid uint32 + ExitCode uint32 + ProcessID string + OOMKilled bool // TODO Windows containerd factor out +} + +// Backend defines callbacks that the client of the library needs to implement. +type Backend interface { + StateChanged(containerID string, state StateInfo) error + AttachStreams(processFriendlyName string, io IOPipe) error +} + +// Client provides access to containerd features. +type Client interface { + Create(containerID string, spec Spec, options ...CreateOption) error + Signal(containerID string, sig int) error + AddProcess(containerID, processFriendlyName string, process Process) error + Resize(containerID, processFriendlyName string, width, height int) error + Pause(containerID string) error + Resume(containerID string) error + Restore(containerID string, options ...CreateOption) error + Stats(containerID string) (*Stats, error) + GetPidsForContainer(containerID string) ([]int, error) + Summary(containerID string) ([]Summary, error) + UpdateResources(containerID string, resources Resources) error +} + +// CreateOption allows to configure parameters of container creation. +type CreateOption interface { + Apply(interface{}) error +} + +// IOPipe contains the stdio streams. +type IOPipe struct { + Stdin io.WriteCloser + Stdout io.Reader + Stderr io.Reader + Terminal bool // Whether stderr is connected on Windows +} diff --git a/vendor/github.com/docker/docker/libcontainerd/types_linux.go b/vendor/github.com/docker/docker/libcontainerd/types_linux.go new file mode 100644 index 00000000..bee12d91 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/types_linux.go @@ -0,0 +1,47 @@ +package libcontainerd + +import ( + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/specs/specs-go" +) + +// Spec is the base configuration for the container. It specifies platform +// independent configuration. This information must be included when the +// bundle is packaged for distribution. +type Spec specs.Spec + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal"` + // User specifies user information for the process. + User *User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd *string `json:"cwd"` + // Capabilities are linux capabilities that are kept for the container. + Capabilities []string `json:"capabilities,omitempty"` + // Rlimits specifies rlimit options to apply to the process. + Rlimits []specs.Rlimit `json:"rlimits,omitempty"` + // ApparmorProfile specified the apparmor profile for the container. + ApparmorProfile *string `json:"apparmorProfile,omitempty"` + // SelinuxProcessLabel specifies the selinux context that the container process is run as. + SelinuxLabel *string `json:"selinuxLabel,omitempty"` +} + +// Stats contains a stats properties from containerd. +type Stats containerd.StatsResponse + +// Summary container a container summary from containerd +type Summary struct{} + +// User specifies linux specific user and group information for the container's +// main process. +type User specs.User + +// Resources defines updatable container resource values. +type Resources containerd.UpdateResource diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_linux.go b/vendor/github.com/docker/docker/libcontainerd/utils_linux.go new file mode 100644 index 00000000..5b67244f --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/utils_linux.go @@ -0,0 +1,52 @@ +package libcontainerd + +import ( + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/specs/specs-go" +) + +func getRootIDs(s specs.Spec) (int, int, error) { + var hasUserns bool + for _, ns := range s.Linux.Namespaces { + if ns.Type == specs.UserNamespace { + hasUserns = true + break + } + } + if !hasUserns { + return 0, 0, nil + } + uid := hostIDFromMap(0, s.Linux.UIDMappings) + gid := hostIDFromMap(0, s.Linux.GIDMappings) + return uid, gid, nil +} + +func hostIDFromMap(id uint32, mp []specs.IDMapping) int { + for _, m := range mp { + if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 { + return int(m.HostID + id - m.ContainerID) + } + } + return 0 +} + +func systemPid(ctr *containerd.Container) uint32 { + var pid uint32 + for _, p := range ctr.Processes { + if p.Pid == InitFriendlyName { + pid = p.SystemPid + } + } + return pid +} + +func convertRlimits(sr []specs.Rlimit) (cr []*containerd.Rlimit) { + for _, r := range sr { + cr = append(cr, &containerd.Rlimit{ + Type: r.Type, + Hard: r.Hard, + Soft: r.Soft, + }) + } + return +} diff --git a/vendor/github.com/docker/docker/oci/defaults_linux.go b/vendor/github.com/docker/docker/oci/defaults_linux.go new file mode 100644 index 00000000..9ee75461 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/defaults_linux.go @@ -0,0 +1,210 @@ +package oci + +import ( + "os" + "runtime" + + "github.com/opencontainers/specs/specs-go" +) + +func sPtr(s string) *string { return &s } +func rPtr(r rune) *rune { return &r } +func iPtr(i int64) *int64 { return &i } +func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } +func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } + +// DefaultSpec returns default oci spec used by docker. +func DefaultSpec() specs.Spec { + s := specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }, + } + s.Mounts = []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + { + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"ro", "nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + } + + s.Process.Capabilities = []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + } + + s.Linux = specs.Linux{ + MaskedPaths: []string{ + "/proc/kcore", + "/proc/latency_stats", + "/proc/timer_stats", + "/proc/sched_debug", + }, + ReadonlyPaths: []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + }, + Namespaces: []specs.Namespace{ + {Type: "mount"}, + {Type: "network"}, + {Type: "uts"}, + {Type: "pid"}, + {Type: "ipc"}, + }, + Devices: []specs.Device{ + { + Type: "c", + Path: "/dev/zero", + Major: 1, + Minor: 5, + FileMode: fmPtr(0666), + UID: u32Ptr(0), + GID: u32Ptr(0), + }, + { + Type: "c", + Path: "/dev/null", + Major: 1, + Minor: 3, + FileMode: fmPtr(0666), + UID: u32Ptr(0), + GID: u32Ptr(0), + }, + { + Type: "c", + Path: "/dev/urandom", + Major: 1, + Minor: 9, + FileMode: fmPtr(0666), + UID: u32Ptr(0), + GID: u32Ptr(0), + }, + { + Type: "c", + Path: "/dev/random", + Major: 1, + Minor: 8, + FileMode: fmPtr(0666), + UID: u32Ptr(0), + GID: u32Ptr(0), + }, + { + Type: "c", + Path: "/dev/fuse", + Major: 10, + Minor: 229, + FileMode: fmPtr(0666), + UID: u32Ptr(0), + GID: u32Ptr(0), + }, + }, + Resources: &specs.Resources{ + Devices: []specs.DeviceCgroup{ + { + Allow: false, + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(5), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(3), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(9), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(8), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(5), + Minor: iPtr(0), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(5), + Minor: iPtr(1), + Access: sPtr("rwm"), + }, + { + Allow: false, + Type: sPtr("c"), + Major: iPtr(10), + Minor: iPtr(229), + Access: sPtr("rwm"), + }, + }, + }, + } + + return s +} diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go deleted file mode 100644 index 7c239e00..00000000 --- a/vendor/github.com/docker/docker/opts/hosts_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build windows - -package opts - -// DefaultHost constant defines the default host string used by docker on Windows -var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go index 0b099817..05aadbe7 100644 --- a/vendor/github.com/docker/docker/opts/opts.go +++ b/vendor/github.com/docker/docker/opts/opts.go @@ -36,7 +36,7 @@ func (opts *ListOpts) String() string { return fmt.Sprintf("%v", []string((*opts.values))) } -// Set validates if needed the input value and adds it to the +// Set validates if needed the input value and add it to the // internal slice. func (opts *ListOpts) Set(value string) error { if opts.validator != nil { @@ -240,35 +240,3 @@ func ValidateLabel(val string) (string, error) { } return val, nil } - -// ValidateSysctl validates an sysctl and returns it. -func ValidateSysctl(val string) (string, error) { - validSysctlMap := map[string]bool{ - "kernel.msgmax": true, - "kernel.msgmnb": true, - "kernel.msgmni": true, - "kernel.sem": true, - "kernel.shmall": true, - "kernel.shmmax": true, - "kernel.shmmni": true, - "kernel.shm_rmid_forced": true, - } - validSysctlPrefixes := []string{ - "net.", - "fs.mqueue.", - } - arr := strings.Split(val, "=") - if len(arr) < 2 { - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) - } - if validSysctlMap[arr[0]] { - return val, nil - } - - for _, vp := range validSysctlPrefixes { - if strings.HasPrefix(arr[0], vp) { - return val, nil - } - } - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) -} diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go deleted file mode 100644 index ebe40c96..00000000 --- a/vendor/github.com/docker/docker/opts/opts_windows.go +++ /dev/null @@ -1,56 +0,0 @@ -package opts - -// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. -// @jhowardmsft, @swernli. -// -// On Windows, this mitigates a problem with the default options of running -// a docker client against a local docker daemon on TP5. -// -// What was found that if the default host is "localhost", even if the client -// (and daemon as this is local) is not physically on a network, and the DNS -// cache is flushed (ipconfig /flushdns), then the client will pause for -// exactly one second when connecting to the daemon for calls. For example -// using docker run windowsservercore cmd, the CLI will send a create followed -// by an attach. You see the delay between the attach finishing and the attach -// being seen by the daemon. -// -// Here's some daemon debug logs with additional debug spew put in. The -// AfterWriteJSON log is the very last thing the daemon does as part of the -// create call. The POST /attach is the second CLI call. Notice the second -// time gap. -// -// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" -// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" -// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." -// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... -// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." -// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." -// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" -// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" -// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" -// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" -// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" -// ... 1 second gap here.... -// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" -// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" -// -// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change -// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, -// the Windows networking stack is supposed to resolve "localhost" internally, -// without hitting DNS, or even reading the hosts file (which is why localhost -// is commented out in the hosts file on Windows). -// -// We have validated that working around this using the actual IPv4 localhost -// address does not cause the delay. -// -// This does not occur with the docker client built with 1.4.3 on the same -// Windows build, regardless of whether the daemon is built using 1.5.1 -// or 1.4.3. It does not occur on Linux. We also verified we see the same thing -// on a cross-compiled Windows binary (from Linux). -// -// Final note: This is a mitigation, not a 'real' fix. It is still susceptible -// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' -// explicitly. - -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 -const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go b/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go new file mode 100644 index 00000000..6af771ff --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go @@ -0,0 +1,92 @@ +// Package aaparser is a convenience package interacting with `apparmor_parser`. +package aaparser + +import ( + "fmt" + "github.com/docker/containerd/subreaper/exec" + "path/filepath" + "strconv" + "strings" +) + +const ( + binary = "apparmor_parser" +) + +// GetVersion returns the major and minor version of apparmor_parser. +func GetVersion() (int, error) { + output, err := cmd("", "--version") + if err != nil { + return -1, err + } + + return parseVersion(output) +} + +// LoadProfile runs `apparmor_parser -r -W` on a specified apparmor profile to +// replace and write it to disk. +func LoadProfile(profilePath string) error { + _, err := cmd(filepath.Dir(profilePath), "-r", "-W", filepath.Base(profilePath)) + if err != nil { + return err + } + return nil +} + +// cmd runs `apparmor_parser` with the passed arguments. +func cmd(dir string, arg ...string) (string, error) { + c := exec.Command(binary, arg...) + c.Dir = dir + + output, err := c.CombinedOutput() + if err != nil { + return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), string(output), err) + } + + return string(output), nil +} + +// parseVersion takes the output from `apparmor_parser --version` and returns +// a representation of the {major, minor, patch} version as a single number of +// the form MMmmPPP {major, minor, patch}. +func parseVersion(output string) (int, error) { + // output is in the form of the following: + // AppArmor parser version 2.9.1 + // Copyright (C) 1999-2008 Novell Inc. + // Copyright 2009-2012 Canonical Ltd. + + lines := strings.SplitN(output, "\n", 2) + words := strings.Split(lines[0], " ") + version := words[len(words)-1] + + // split by major minor version + v := strings.Split(version, ".") + if len(v) == 0 || len(v) > 3 { + return -1, fmt.Errorf("parsing version failed for output: `%s`", output) + } + + // Default the versions to 0. + var majorVersion, minorVersion, patchLevel int + + majorVersion, err := strconv.Atoi(v[0]) + if err != nil { + return -1, err + } + + if len(v) > 1 { + minorVersion, err = strconv.Atoi(v[1]) + if err != nil { + return -1, err + } + } + if len(v) > 2 { + patchLevel, err = strconv.Atoi(v[2]) + if err != nil { + return -1, err + } + } + + // major*10^5 + minor*10^3 + patch*10^0 + numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel + return numericVersion, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index 5f8cd091..923e7ca1 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -11,7 +11,7 @@ import ( "io" "io/ioutil" "os" - "os/exec" + "github.com/docker/containerd/subreaper/exec" "path/filepath" "runtime" "strings" @@ -193,7 +193,7 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { } // CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { +func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) switch compression { @@ -425,19 +425,10 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } } - var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - // We ignore errors here because not all graphdrivers support xattrs. - errors = append(errors, err.Error()) + return err } - - } - - if len(errors) > 0 { - logrus.WithFields(logrus.Fields{ - "errors": errors, - }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this @@ -643,7 +634,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) if err := ta.addTarFile(filePath, relFilePath); err != nil { logrus.Errorf("Can't add file %s to tar: %s", filePath, err) - // if pipe is broken, stop writing tar stream to it + // if pipe is broken, stop writting tar stream to it if err == io.ErrClosedPipe { return err } diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go deleted file mode 100644 index 5c3a1be3..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build windows - -package archive - -import ( - "archive/tar" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/longpath" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return longpath.AddPrefix(srcPath) -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. -func getWalkRoot(srcPath string, include string) string { - return filepath.Join(srcPath, include) -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - // windows: convert windows style relative path with backslashes - // into forward slashes. Since windows does not allow '/' or '\' - // in file names, it is mostly safe to replace however we must - // check just in case - if strings.Contains(p, "/") { - return "", fmt.Errorf("Windows path contains forward slash: %s", p) - } - return strings.Replace(p, string(os.PathSeparator), "/", -1), nil - -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - perm &= 0755 - // Add the x bit: make everything +x from windows - perm |= 0111 - - return perm -} - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows - return -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - return nil -} - -func getFileUIDGID(stat interface{}) (int, int, error) { - // no notion of file ownership mapping yet on Windows - return 0, 0, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go deleted file mode 100644 index af94243f..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package archive - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - - // Don't look at size for dirs, its not a good measure of change - if oldStat.ModTime() != newStat.ModTime() || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.IsDir() -} - -func getIno(fi os.FileInfo) (inode uint64) { - return -} - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go deleted file mode 100644 index 2b775b45..00000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.FromSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/authorization/api.go b/vendor/github.com/docker/docker/pkg/authorization/api.go new file mode 100644 index 00000000..fc82c46b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/api.go @@ -0,0 +1,54 @@ +package authorization + +const ( + // AuthZApiRequest is the url for daemon request authorization + AuthZApiRequest = "AuthZPlugin.AuthZReq" + + // AuthZApiResponse is the url for daemon response authorization + AuthZApiResponse = "AuthZPlugin.AuthZRes" + + // AuthZApiImplements is the name of the interface all AuthZ plugins implement + AuthZApiImplements = "authz" +) + +// Request holds data required for authZ plugins +type Request struct { + // User holds the user extracted by AuthN mechanism + User string `json:"User,omitempty"` + + // UserAuthNMethod holds the mechanism used to extract user details (e.g., krb) + UserAuthNMethod string `json:"UserAuthNMethod,omitempty"` + + // RequestMethod holds the HTTP method (GET/POST/PUT) + RequestMethod string `json:"RequestMethod,omitempty"` + + // RequestUri holds the full HTTP uri (e.g., /v1.21/version) + RequestURI string `json:"RequestUri,omitempty"` + + // RequestBody stores the raw request body sent to the docker daemon + RequestBody []byte `json:"RequestBody,omitempty"` + + // RequestHeaders stores the raw request headers sent to the docker daemon + RequestHeaders map[string]string `json:"RequestHeaders,omitempty"` + + // ResponseStatusCode stores the status code returned from docker daemon + ResponseStatusCode int `json:"ResponseStatusCode,omitempty"` + + // ResponseBody stores the raw response body sent from docker daemon + ResponseBody []byte `json:"ResponseBody,omitempty"` + + // ResponseHeaders stores the response headers sent to the docker daemon + ResponseHeaders map[string]string `json:"ResponseHeaders,omitempty"` +} + +// Response represents authZ plugin response +type Response struct { + // Allow indicating whether the user is allowed or not + Allow bool `json:"Allow"` + + // Msg stores the authorization message + Msg string `json:"Msg,omitempty"` + + // Err stores a message in case there's an error + Err string `json:"Err,omitempty"` +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/authz.go b/vendor/github.com/docker/docker/pkg/authorization/authz.go new file mode 100644 index 00000000..f7039086 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/authz.go @@ -0,0 +1,165 @@ +package authorization + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/ioutils" +) + +const maxBodySize = 1048576 // 1MB + +// NewCtx creates new authZ context, it is used to store authorization information related to a specific docker +// REST http session +// A context provides two method: +// Authenticate Request: +// Call authZ plugins with current REST request and AuthN response +// Request contains full HTTP packet sent to the docker daemon +// https://docs.docker.com/reference/api/docker_remote_api/ +// +// Authenticate Response: +// Call authZ plugins with full info about current REST request, REST response and AuthN response +// The response from this method may contains content that overrides the daemon response +// This allows authZ plugins to filter privileged content +// +// If multiple authZ plugins are specified, the block/allow decision is based on ANDing all plugin results +// For response manipulation, the response from each plugin is piped between plugins. Plugin execution order +// is determined according to daemon parameters +func NewCtx(authZPlugins []Plugin, user, userAuthNMethod, requestMethod, requestURI string) *Ctx { + return &Ctx{ + plugins: authZPlugins, + user: user, + userAuthNMethod: userAuthNMethod, + requestMethod: requestMethod, + requestURI: requestURI, + } +} + +// Ctx stores a a single request-response interaction context +type Ctx struct { + user string + userAuthNMethod string + requestMethod string + requestURI string + plugins []Plugin + // authReq stores the cached request object for the current transaction + authReq *Request +} + +// AuthZRequest authorized the request to the docker daemon using authZ plugins +func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error { + var body []byte + if sendBody(ctx.requestURI, r.Header) && r.ContentLength > 0 && r.ContentLength < maxBodySize { + var err error + body, r.Body, err = drainBody(r.Body) + if err != nil { + return err + } + } + + var h bytes.Buffer + if err := r.Header.Write(&h); err != nil { + return err + } + + ctx.authReq = &Request{ + User: ctx.user, + UserAuthNMethod: ctx.userAuthNMethod, + RequestMethod: ctx.requestMethod, + RequestURI: ctx.requestURI, + RequestBody: body, + RequestHeaders: headers(r.Header), + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ request using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZRequest(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return fmt.Errorf("authorization denied by plugin %s: %s", plugin.Name(), authRes.Msg) + } + } + + return nil +} + +// AuthZResponse authorized and manipulates the response from docker daemon using authZ plugins +func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error { + ctx.authReq.ResponseStatusCode = rm.StatusCode() + ctx.authReq.ResponseHeaders = headers(rm.Header()) + + if sendBody(ctx.requestURI, rm.Header()) { + ctx.authReq.ResponseBody = rm.RawBody() + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ response using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZResponse(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return fmt.Errorf("authorization denied by plugin %s: %s", plugin.Name(), authRes.Msg) + } + } + + rm.FlushAll() + + return nil +} + +// drainBody dump the body (if it's length is less than 1MB) without modifying the request state +func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) { + bufReader := bufio.NewReaderSize(body, maxBodySize) + newBody := ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + data, err := bufReader.Peek(maxBodySize) + // Body size exceeds max body size + if err == nil { + logrus.Warnf("Request body is larger than: '%d' skipping body", maxBodySize) + return nil, newBody, nil + } + // Body size is less than maximum size + if err == io.EOF { + return data, newBody, nil + } + // Unknown error + return nil, newBody, err +} + +// sendBody returns true when request/response body should be sent to AuthZPlugin +func sendBody(url string, header http.Header) bool { + // Skip body for auth endpoint + if strings.HasSuffix(url, "/auth") { + return false + } + + // body is sent only for text or json messages + return header.Get("Content-Type") == "application/json" +} + +// headers returns flatten version of the http headers excluding authorization +func headers(header http.Header) map[string]string { + v := make(map[string]string, 0) + for k, values := range header { + // Skip authorization headers + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") { + continue + } + for _, val := range values { + v[k] = val + } + } + return v +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/plugin.go b/vendor/github.com/docker/docker/pkg/authorization/plugin.go new file mode 100644 index 00000000..1b65ac0a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/plugin.go @@ -0,0 +1,83 @@ +package authorization + +import "github.com/docker/docker/pkg/plugins" + +// Plugin allows third party plugins to authorize requests and responses +// in the context of docker API +type Plugin interface { + // Name returns the registered plugin name + Name() string + + // AuthZRequest authorize the request from the client to the daemon + AuthZRequest(*Request) (*Response, error) + + // AuthZResponse authorize the response from the daemon to the client + AuthZResponse(*Request) (*Response, error) +} + +// NewPlugins constructs and initialize the authorization plugins based on plugin names +func NewPlugins(names []string) []Plugin { + plugins := []Plugin{} + pluginsMap := make(map[string]struct{}) + for _, name := range names { + if _, ok := pluginsMap[name]; ok { + continue + } + pluginsMap[name] = struct{}{} + plugins = append(plugins, newAuthorizationPlugin(name)) + } + return plugins +} + +// authorizationPlugin is an internal adapter to docker plugin system +type authorizationPlugin struct { + plugin *plugins.Plugin + name string +} + +func newAuthorizationPlugin(name string) Plugin { + return &authorizationPlugin{name: name} +} + +func (a *authorizationPlugin) Name() string { + return a.name +} + +func (a *authorizationPlugin) AuthZRequest(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Client.Call(AuthZApiRequest, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +func (a *authorizationPlugin) AuthZResponse(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Client.Call(AuthZApiResponse, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +// initPlugin initialize the authorization plugin if needed +func (a *authorizationPlugin) initPlugin() error { + // Lazy loading of plugins + if a.plugin == nil { + var err error + a.plugin, err = plugins.Get(a.name, AuthZApiImplements) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/response.go b/vendor/github.com/docker/docker/pkg/authorization/response.go new file mode 100644 index 00000000..245a0ef7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/response.go @@ -0,0 +1,203 @@ +package authorization + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "github.com/Sirupsen/logrus" + "net" + "net/http" +) + +// ResponseModifier allows authorization plugins to read and modify the content of the http.response +type ResponseModifier interface { + http.ResponseWriter + http.Flusher + http.CloseNotifier + + // RawBody returns the current http content + RawBody() []byte + + // RawHeaders returns the current content of the http headers + RawHeaders() ([]byte, error) + + // StatusCode returns the current status code + StatusCode() int + + // OverrideBody replace the body of the HTTP reply + OverrideBody(b []byte) + + // OverrideHeader replace the headers of the HTTP reply + OverrideHeader(b []byte) error + + // OverrideStatusCode replaces the status code of the HTTP reply + OverrideStatusCode(statusCode int) + + // Flush flushes all data to the HTTP response + FlushAll() error + + // Hijacked indicates the response has been hijacked by the Docker daemon + Hijacked() bool +} + +// NewResponseModifier creates a wrapper to an http.ResponseWriter to allow inspecting and modifying the content +func NewResponseModifier(rw http.ResponseWriter) ResponseModifier { + return &responseModifier{rw: rw, header: make(http.Header)} +} + +// responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore +// the http request/response from docker daemon +type responseModifier struct { + // The original response writer + rw http.ResponseWriter + + r *http.Request + + status int + // body holds the response body + body []byte + // header holds the response header + header http.Header + // statusCode holds the response status code + statusCode int + // hijacked indicates the request has been hijacked + hijacked bool +} + +func (rm *responseModifier) Hijacked() bool { + return rm.hijacked +} + +// WriterHeader stores the http status code +func (rm *responseModifier) WriteHeader(s int) { + + // Use original request if hijacked + if rm.hijacked { + rm.rw.WriteHeader(s) + return + } + + rm.statusCode = s +} + +// Header returns the internal http header +func (rm *responseModifier) Header() http.Header { + + // Use original header if hijacked + if rm.hijacked { + return rm.rw.Header() + } + + return rm.header +} + +// Header returns the internal http header +func (rm *responseModifier) StatusCode() int { + return rm.statusCode +} + +// Override replace the body of the HTTP reply +func (rm *responseModifier) OverrideBody(b []byte) { + rm.body = b +} + +func (rm *responseModifier) OverrideStatusCode(statusCode int) { + rm.statusCode = statusCode +} + +// Override replace the headers of the HTTP reply +func (rm *responseModifier) OverrideHeader(b []byte) error { + header := http.Header{} + if err := json.Unmarshal(b, &header); err != nil { + return err + } + rm.header = header + return nil +} + +// Write stores the byte array inside content +func (rm *responseModifier) Write(b []byte) (int, error) { + + if rm.hijacked { + return rm.rw.Write(b) + } + + rm.body = append(rm.body, b...) + return len(b), nil +} + +// Body returns the response body +func (rm *responseModifier) RawBody() []byte { + return rm.body +} + +func (rm *responseModifier) RawHeaders() ([]byte, error) { + var b bytes.Buffer + if err := rm.header.Write(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Hijack returns the internal connection of the wrapped http.ResponseWriter +func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) { + + rm.hijacked = true + rm.FlushAll() + + hijacker, ok := rm.rw.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("Internal response writer doesn't support the Hijacker interface") + } + return hijacker.Hijack() +} + +// CloseNotify uses the internal close notify API of the wrapped http.ResponseWriter +func (rm *responseModifier) CloseNotify() <-chan bool { + closeNotifier, ok := rm.rw.(http.CloseNotifier) + if !ok { + logrus.Errorf("Internal response writer doesn't support the CloseNotifier interface") + return nil + } + return closeNotifier.CloseNotify() +} + +// Flush uses the internal flush API of the wrapped http.ResponseWriter +func (rm *responseModifier) Flush() { + flusher, ok := rm.rw.(http.Flusher) + if !ok { + logrus.Errorf("Internal response writer doesn't support the Flusher interface") + return + } + + rm.FlushAll() + flusher.Flush() +} + +// FlushAll flushes all data to the HTTP response +func (rm *responseModifier) FlushAll() error { + // Copy the status code + if rm.statusCode > 0 { + rm.rw.WriteHeader(rm.statusCode) + } + + // Copy the header + for k, vv := range rm.header { + for _, v := range vv { + rm.rw.Header().Add(k, v) + } + } + + var err error + if len(rm.body) > 0 { + // Write body + _, err = rm.rw.Write(rm.body) + } + + // Clean previous data + rm.body = nil + rm.statusCode = 0 + rm.header = http.Header{} + return err +} diff --git a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go new file mode 100644 index 00000000..784d65d6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go @@ -0,0 +1,49 @@ +package broadcaster + +import ( + "io" + "sync" +) + +// Unbuffered accumulates multiple io.WriteCloser by stream. +type Unbuffered struct { + mu sync.Mutex + writers []io.WriteCloser +} + +// Add adds new io.WriteCloser. +func (w *Unbuffered) Add(writer io.WriteCloser) { + w.mu.Lock() + w.writers = append(w.writers, writer) + w.mu.Unlock() +} + +// Write writes bytes to all writers. Failed writers will be evicted during +// this call. +func (w *Unbuffered) Write(p []byte) (n int, err error) { + w.mu.Lock() + var evict []int + for i, sw := range w.writers { + if n, err := sw.Write(p); err != nil || n != len(p) { + // On error, evict the writer + evict = append(evict, i) + } + } + for n, i := range evict { + w.writers = append(w.writers[:i-n], w.writers[i-n+1:]...) + } + w.mu.Unlock() + return len(p), nil +} + +// Clean closes and removes all writers. Last non-eol-terminated part of data +// will be saved. +func (w *Unbuffered) Clean() error { + w.mu.Lock() + for _, sw := range w.writers { + sw.Close() + } + w.writers = nil + w.mu.Unlock() + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go deleted file mode 100644 index 0a500ed5..00000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go +++ /dev/null @@ -1,22 +0,0 @@ -package chrootarchive - -import ( - "io" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/longpath" -) - -// chroot is not supported by Windows -func chroot(path string) error { - return nil -} - -func invokeUnpack(decompressedArchive io.ReadCloser, - dest string, - options *archive.TarOptions) error { - // Windows is different to Linux here because Windows does not support - // chroot. Hence there is no point sandboxing a chrooted process to - // do the unpack. We call inline instead within the daemon process. - return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go deleted file mode 100644 index 8e1830cb..00000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go +++ /dev/null @@ -1,44 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/longpath" -) - -// applyLayerHandler parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { - dest = filepath.Clean(dest) - - // Ensure it is a Windows-style volume path - dest = longpath.AddPrefix(dest) - - if decompress { - decompressed, err := archive.DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompressed.Close() - - layer = decompressed - } - - tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") - if err != nil { - return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) - } - - s, err := archive.UnpackLayer(dest, layer, nil) - os.RemoveAll(tmpDir) - if err != nil { - return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) - } - - return s, nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go deleted file mode 100644 index fa17c9bf..00000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package chrootarchive - -func init() { -} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go new file mode 100644 index 00000000..23befae6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go @@ -0,0 +1,40 @@ +// Package filenotify provides a mechanism for watching file(s) for changes. +// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. +// These are wrapped up in a common interface so that either can be used interchangeably in your code. +package filenotify + +import "gopkg.in/fsnotify.v1" + +// FileWatcher is an interface for implementing file notification watchers +type FileWatcher interface { + Events() <-chan fsnotify.Event + Errors() <-chan error + Add(name string) error + Remove(name string) error + Close() error +} + +// New tries to use an fs-event watcher, and falls back to the poller if there is an error +func New() (FileWatcher, error) { + if watcher, err := NewEventWatcher(); err == nil { + return watcher, nil + } + return NewPollingWatcher(), nil +} + +// NewPollingWatcher returns a poll-based file watcher +func NewPollingWatcher() FileWatcher { + return &filePoller{ + events: make(chan fsnotify.Event), + errors: make(chan error), + } +} + +// NewEventWatcher returns an fs-event based file watcher +func NewEventWatcher() (FileWatcher, error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, err + } + return &fsNotifyWatcher{watcher}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go new file mode 100644 index 00000000..42038835 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go @@ -0,0 +1,18 @@ +package filenotify + +import "gopkg.in/fsnotify.v1" + +// fsNotify wraps the fsnotify package to satisfy the FileNotifer interface +type fsNotifyWatcher struct { + *fsnotify.Watcher +} + +// GetEvents returns the fsnotify event channel receiver +func (w *fsNotifyWatcher) Events() <-chan fsnotify.Event { + return w.Watcher.Events +} + +// GetErrors returns the fsnotify error channel receiver +func (w *fsNotifyWatcher) Errors() <-chan error { + return w.Watcher.Errors +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/poller.go b/vendor/github.com/docker/docker/pkg/filenotify/poller.go new file mode 100644 index 00000000..52610853 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/poller.go @@ -0,0 +1,204 @@ +package filenotify + +import ( + "errors" + "fmt" + "os" + "sync" + "time" + + "github.com/Sirupsen/logrus" + + "gopkg.in/fsnotify.v1" +) + +var ( + // errPollerClosed is returned when the poller is closed + errPollerClosed = errors.New("poller is closed") + // errNoSuchPoller is returned when trying to remove a watch that doesn't exist + errNoSuchWatch = errors.New("poller does not exist") +) + +// watchWaitTime is the time to wait between file poll loops +const watchWaitTime = 200 * time.Millisecond + +// filePoller is used to poll files for changes, especially in cases where fsnotify +// can't be run (e.g. when inotify handles are exhausted) +// filePoller satisfies the FileWatcher interface +type filePoller struct { + // watches is the list of files currently being polled, close the associated channel to stop the watch + watches map[string]chan struct{} + // events is the channel to listen to for watch events + events chan fsnotify.Event + // errors is the channel to listen to for watch errors + errors chan error + // mu locks the poller for modification + mu sync.Mutex + // closed is used to specify when the poller has already closed + closed bool +} + +// Add adds a filename to the list of watches +// once added the file is polled for changes in a separate goroutine +func (w *filePoller) Add(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed == true { + return errPollerClosed + } + + f, err := os.Open(name) + if err != nil { + return err + } + fi, err := os.Stat(name) + if err != nil { + return err + } + + if w.watches == nil { + w.watches = make(map[string]chan struct{}) + } + if _, exists := w.watches[name]; exists { + return fmt.Errorf("watch exists") + } + chClose := make(chan struct{}) + w.watches[name] = chClose + + go w.watch(f, fi, chClose) + return nil +} + +// Remove stops and removes watch with the specified name +func (w *filePoller) Remove(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + return w.remove(name) +} + +func (w *filePoller) remove(name string) error { + if w.closed == true { + return errPollerClosed + } + + chClose, exists := w.watches[name] + if !exists { + return errNoSuchWatch + } + close(chClose) + delete(w.watches, name) + return nil +} + +// Events returns the event channel +// This is used for notifications on events about watched files +func (w *filePoller) Events() <-chan fsnotify.Event { + return w.events +} + +// Errors returns the errors channel +// This is used for notifications about errors on watched files +func (w *filePoller) Errors() <-chan error { + return w.errors +} + +// Close closes the poller +// All watches are stopped, removed, and the poller cannot be added to +func (w *filePoller) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return nil + } + + w.closed = true + for name := range w.watches { + w.remove(name) + delete(w.watches, name) + } + return nil +} + +// sendEvent publishes the specified event to the events channel +func (w *filePoller) sendEvent(e fsnotify.Event, chClose <-chan struct{}) error { + select { + case w.events <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// sendErr publishes the specified error to the errors channel +func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error { + select { + case w.errors <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// watch is responsible for polling the specified file for changes +// upon finding changes to a file or errors, sendEvent/sendErr is called +func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) { + defer f.Close() + for { + time.Sleep(watchWaitTime) + select { + case <-chClose: + logrus.Debugf("watch for %s closed", f.Name()) + return + default: + } + + fi, err := os.Stat(f.Name()) + if err != nil { + // if we got an error here and lastFi is not set, we can presume that nothing has changed + // This should be safe since before `watch()` is called, a stat is performed, there is any error `watch` is not called + if lastFi == nil { + continue + } + // If it doesn't exist at this point, it must have been removed + // no need to send the error here since this is a valid operation + if os.IsNotExist(err) { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Remove, Name: f.Name()}, chClose); err != nil { + return + } + lastFi = nil + continue + } + // at this point, send the error + if err := w.sendErr(err, chClose); err != nil { + return + } + continue + } + + if lastFi == nil { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Create, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.Mode() != lastFi.Mode() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Chmod, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.ModTime() != lastFi.ModTime() || fi.Size() != lastFi.Size() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Write, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go deleted file mode 100644 index 5ec21cac..00000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. Not supported -// on Windows. -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go b/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go index ded091f2..b4afcf91 100644 --- a/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go +++ b/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go @@ -6,7 +6,7 @@ import ( "net/http" "net/url" "os" - "os/exec" + "github.com/docker/containerd/subreaper/exec" "path/filepath" "strings" diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go deleted file mode 100644 index c9e3c937..00000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package idtools - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -// Platforms such as Windows do not support the UID/GID concept. So make this -// just a wrapper around system.MkdirAll. -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { - return err - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go index 4a4aaed0..2bef12a5 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -2,13 +2,12 @@ package idtools import ( "fmt" - "os/exec" + "github.com/docker/containerd/subreaper/exec" "path/filepath" "regexp" "sort" "strconv" "strings" - "sync" ) // add a user and/or group to Linux /etc/passwd, /etc/group using standard @@ -17,7 +16,6 @@ import ( // useradd -r -s /bin/false var ( - once sync.Once userCommand string cmdTemplates = map[string]string{ @@ -33,6 +31,15 @@ var ( userMod = "usermod" ) +func init() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } +} + func resolveBinary(binname string) (string, error) { binaryPath, err := exec.LookPath(binname) if err != nil { @@ -87,14 +94,7 @@ func AddNamespaceRangesUser(name string) (int, int, error) { } func addUser(userName string) error { - once.Do(func() { - // set up which commands are used for adding users/groups dependent on distro - if _, err := resolveBinary("adduser"); err == nil { - userCommand = "adduser" - } else if _, err := resolveBinary("useradd"); err == nil { - userCommand = "useradd" - } - }) + if userCommand == "" { return fmt.Errorf("Cannot add user; no useradd/adduser binary found") } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go deleted file mode 100644 index 3d737b3e..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go +++ /dev/null @@ -1,51 +0,0 @@ -package ioutils - -import ( - "errors" - "io" -) - -var errBufferFull = errors.New("buffer is full") - -type fixedBuffer struct { - buf []byte - pos int - lastRead int -} - -func (b *fixedBuffer) Write(p []byte) (int, error) { - n := copy(b.buf[b.pos:cap(b.buf)], p) - b.pos += n - - if n < len(p) { - if b.pos == cap(b.buf) { - return n, errBufferFull - } - return n, io.ErrShortWrite - } - return n, nil -} - -func (b *fixedBuffer) Read(p []byte) (int, error) { - n := copy(p, b.buf[b.lastRead:b.pos]) - b.lastRead += n - return n, nil -} - -func (b *fixedBuffer) Len() int { - return b.pos - b.lastRead -} - -func (b *fixedBuffer) Cap() int { - return cap(b.buf) -} - -func (b *fixedBuffer) Reset() { - b.pos = 0 - b.lastRead = 0 - b.buf = b.buf[:0] -} - -func (b *fixedBuffer) String() string { - return string(b.buf[b.lastRead:b.pos]) -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go index 59bba962..fcaecc37 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -9,19 +9,12 @@ import ( // maxCap is the highest capacity to use in byte slices that buffer data. const maxCap = 1e6 -// minCap is the lowest capacity to use in byte slices that buffer data -const minCap = 64 - // blockThreshold is the minimum number of bytes in the buffer which will cause // a write to BytesPipe to block when allocating a new slice. const blockThreshold = 1e6 -var ( - // ErrClosed is returned when Write is called on a closed BytesPipe. - ErrClosed = errors.New("write to closed BytesPipe") - - bufPools = make(map[int]*sync.Pool) -) +// ErrClosed is returned when Write is called on a closed BytesPipe. +var ErrClosed = errors.New("write to closed BytesPipe") // BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). // All written data may be read at most once. Also, BytesPipe allocates @@ -30,17 +23,22 @@ var ( type BytesPipe struct { mu sync.Mutex wait *sync.Cond - buf []*fixedBuffer - bufLen int - closeErr error // error to return from next Read. set to nil if not closed. + buf [][]byte // slice of byte-slices of buffered data + lastRead int // index in the first slice to a read point + bufLen int // length of data buffered over the slices + closeErr error // error to return from next Read. set to nil if not closed. } // NewBytesPipe creates new BytesPipe, initialized by specified slice. // If buf is nil, then it will be initialized with slice which cap is 64. // buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe() *BytesPipe { - bp := &BytesPipe{} - bp.buf = append(bp.buf, getBuffer(minCap)) +func NewBytesPipe(buf []byte) *BytesPipe { + if cap(buf) == 0 { + buf = make([]byte, 0, 64) + } + bp := &BytesPipe{ + buf: [][]byte{buf[:0]}, + } bp.wait = sync.NewCond(&bp.mu) return bp } @@ -49,31 +47,23 @@ func NewBytesPipe() *BytesPipe { // It can allocate new []byte slices in a process of writing. func (bp *BytesPipe) Write(p []byte) (int, error) { bp.mu.Lock() - + defer bp.mu.Unlock() written := 0 loop0: for { if bp.closeErr != nil { - bp.mu.Unlock() return written, ErrClosed } - - if len(bp.buf) == 0 { - bp.buf = append(bp.buf, getBuffer(64)) - } - // get the last buffer + // write data to the last buffer b := bp.buf[len(bp.buf)-1] - - n, err := b.Write(p) - written += n + // copy data to the current empty allocated area + n := copy(b[len(b):cap(b)], p) + // increment buffered data length bp.bufLen += n + // include written data in last buffer + bp.buf[len(bp.buf)-1] = b[:len(b)+n] - // errBufferFull is an error we expect to get if the buffer is full - if err != nil && err != errBufferFull { - bp.wait.Broadcast() - bp.mu.Unlock() - return written, err - } + written += n // if there was enough room to write all then break if len(p) == n { @@ -83,7 +73,7 @@ loop0: // more data: write to the next slice p = p[n:] - // make sure the buffer doesn't grow too big from this write + // block if too much data is still in the buffer for bp.bufLen >= blockThreshold { bp.wait.Wait() if bp.closeErr != nil { @@ -91,15 +81,15 @@ loop0: } } - // add new byte slice to the buffers slice and continue writing - nextCap := b.Cap() * 2 + // allocate slice that has twice the size of the last unless maximum reached + nextCap := 2 * cap(bp.buf[len(bp.buf)-1]) if nextCap > maxCap { nextCap = maxCap } - bp.buf = append(bp.buf, getBuffer(nextCap)) + // add new byte slice to the buffers slice and continue writing + bp.buf = append(bp.buf, make([]byte, 0, nextCap)) } bp.wait.Broadcast() - bp.mu.Unlock() return written, nil } @@ -121,60 +111,46 @@ func (bp *BytesPipe) Close() error { return bp.CloseWithError(nil) } +func (bp *BytesPipe) len() int { + return bp.bufLen - bp.lastRead +} + // Read reads bytes from BytesPipe. // Data could be read only once. func (bp *BytesPipe) Read(p []byte) (n int, err error) { bp.mu.Lock() - if bp.bufLen == 0 { + defer bp.mu.Unlock() + if bp.len() == 0 { if bp.closeErr != nil { - bp.mu.Unlock() return 0, bp.closeErr } bp.wait.Wait() - if bp.bufLen == 0 && bp.closeErr != nil { - bp.mu.Unlock() + if bp.len() == 0 && bp.closeErr != nil { return 0, bp.closeErr } } - - for bp.bufLen > 0 { - b := bp.buf[0] - read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + for { + read := copy(p, bp.buf[0][bp.lastRead:]) n += read - bp.bufLen -= read - - if b.Len() == 0 { - // it's empty so return it to the pool and move to the next one - returnBuffer(b) - bp.buf[0] = nil - bp.buf = bp.buf[1:] + bp.lastRead += read + if bp.len() == 0 { + // we have read everything. reset to the beginning. + bp.lastRead = 0 + bp.bufLen -= len(bp.buf[0]) + bp.buf[0] = bp.buf[0][:0] + break } - + // break if everything was read if len(p) == read { break } - + // more buffered data and more asked. read from next slice. p = p[read:] + bp.lastRead = 0 + bp.bufLen -= len(bp.buf[0]) + bp.buf[0] = nil // throw away old slice + bp.buf = bp.buf[1:] // switch to next } - bp.wait.Broadcast() - bp.mu.Unlock() return } - -func returnBuffer(b *fixedBuffer) { - b.Reset() - pool := bufPools[b.Cap()] - if pool != nil { - pool.Put(b) - } -} - -func getBuffer(size int) *fixedBuffer { - pool, ok := bufPools[size] - if !ok { - pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} - bufPools[size] = pool - } - return pool.Get().(*fixedBuffer) -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go deleted file mode 100644 index ca976707..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go +++ /dev/null @@ -1,75 +0,0 @@ -package ioutils - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" -) - -// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a -// temporary file and closing it atomically changes the temporary file to -// destination path. Writing and closing concurrently is not allowed. -func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return nil, err - } - abspath, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - return &atomicFileWriter{ - f: f, - fn: abspath, - }, nil -} - -// AtomicWriteFile atomically writes data to a file named by filename. -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := NewAtomicFileWriter(filename, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type atomicFileWriter struct { - f *os.File - fn string - writeErr error -} - -func (w *atomicFileWriter) Write(dt []byte) (int, error) { - n, err := w.f.Write(dt) - if err != nil { - w.writeErr = err - } - return n, err -} - -func (w *atomicFileWriter) Close() (retErr error) { - defer func() { - if retErr != nil { - os.Remove(w.f.Name()) - } - }() - if err := w.f.Sync(); err != nil { - w.f.Close() - return err - } - if err := w.f.Close(); err != nil { - return err - } - if w.writeErr == nil { - return os.Rename(w.f.Name(), w.fn) - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/scheduler.go b/vendor/github.com/docker/docker/pkg/ioutils/scheduler.go new file mode 100644 index 00000000..3c88f29e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/scheduler.go @@ -0,0 +1,6 @@ +// +build !gccgo + +package ioutils + +func callSchedulerIfNecessary() { +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go b/vendor/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go new file mode 100644 index 00000000..c11d02b9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go @@ -0,0 +1,13 @@ +// +build gccgo + +package ioutils + +import ( + "runtime" +) + +func callSchedulerIfNecessary() { + //allow or force Go scheduler to switch context, without explicitly + //forcing this will make it hang when using gccgo implementation + runtime.Gosched() +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go deleted file mode 100644 index c258e5fd..00000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package ioutils - -import ( - "io/ioutil" - - "github.com/docker/docker/pkg/longpath" -) - -// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. -func TempDir(dir, prefix string) (string, error) { - tempDir, err := ioutil.TempDir(dir, prefix) - if err != nil { - return "", err - } - return longpath.AddPrefix(tempDir), nil -} diff --git a/vendor/github.com/docker/docker/pkg/locker/README.md b/vendor/github.com/docker/docker/pkg/locker/README.md new file mode 100644 index 00000000..e84a815c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + s.mu.Lock() + i.data[name] = data + s.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modfying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/docker/docker/pkg/locker/locker.go b/vendor/github.com/docker/docker/pkg/locker/locker.go new file mode 100644 index 00000000..0b22ddfa --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go deleted file mode 100644 index 9b15bfff..00000000 --- a/vendor/github.com/docker/docker/pkg/longpath/longpath.go +++ /dev/null @@ -1,26 +0,0 @@ -// longpath introduces some constants and helper functions for handling long paths -// in Windows, which are expected to be prepended with `\\?\` and followed by either -// a drive letter, a UNC server\share, or a volume identifier. - -package longpath - -import ( - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix will add the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go deleted file mode 100644 index dab8a37e..00000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package mount - -func parseMountTable() ([]*Info, error) { - // Do NOT return an error! - return nil, nil -} diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go new file mode 100644 index 00000000..1764fc28 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go @@ -0,0 +1,524 @@ +package namesgenerator + +import ( + "fmt" + + "github.com/docker/docker/pkg/random" +) + +var ( + left = [...]string{ + "admiring", + "adoring", + "agitated", + "amazing", + "angry", + "awesome", + "backstabbing", + "berserk", + "big", + "boring", + "clever", + "cocky", + "compassionate", + "condescending", + "cranky", + "desperate", + "determined", + "distracted", + "dreamy", + "drunk", + "ecstatic", + "elated", + "elegant", + "evil", + "fervent", + "focused", + "furious", + "gigantic", + "gloomy", + "goofy", + "grave", + "happy", + "high", + "hopeful", + "hungry", + "insane", + "jolly", + "jovial", + "kickass", + "lonely", + "loving", + "mad", + "modest", + "naughty", + "nauseous", + "nostalgic", + "pedantic", + "pensive", + "prickly", + "reverent", + "romantic", + "sad", + "serene", + "sharp", + "sick", + "silly", + "sleepy", + "small", + "stoic", + "stupefied", + "suspicious", + "tender", + "thirsty", + "tiny", + "trusting", + } + + // Docker, starting from 0.7.x, generates names from notable scientists and hackers. + // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. + right = [...]string{ + // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB + "albattani", + + // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen + "allen", + + // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida + "almeida", + + // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes + "archimedes", + + // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli + "ardinghelli", + + // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata + "aryabhata", + + // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin + "austin", + + // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. + "babbage", + + // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach + "banach", + + // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen + "bardeen", + + // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik + "bartik", + + // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi + "bassi", + + // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell + "bell", + + // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha + "bhabha", + + // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus + "bhaskara", + + // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell + "blackwell", + + // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. + "bohr", + + // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth + "booth", + + // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg + "borg", + + // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose + "bose", + + // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville + "boyd", + + // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero + "brahmagupta", + + // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain + "brattain", + + // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) + "brown", + + // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson + "carson", + + // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar + "chandrasekhar", + + // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden + "colden", + + // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori + "cori", + + // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray + "cray", + + // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. + "curie", + + // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. + "darwin", + + // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. + "davinci", + + // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. + "dijkstra", + + // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky + "dubinsky", + + // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley + "easley", + + // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein + "einstein", + + // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion + "elion", + + // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart + "engelbart", + + // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid + "euclid", + + // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler + "euler", + + // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat + "fermat", + + // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. + "fermi", + + // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman + "feynman", + + // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. + "franklin", + + // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei + "galileo", + + // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates + "gates", + + // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) + "goldberg", + + // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine + "goldstine", + + // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser + "goldwasser", + + // James Golick, all around gangster. + "golick", + + // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall + "goodall", + + // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) + "hamilton", + + // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking + "hawking", + + // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg + "heisenberg", + + // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD + "heyrovsky", + + // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin + "hodgkin", + + // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephon switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover + "hoover", + + // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper + "hopper", + + // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle + "hugle", + + // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia + "hypatia", + + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil + "jang", + + // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik + "jennings", + + // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen + "jepsen", + + // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie + "joliot", + + // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones + "jones", + + // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam + "kalam", + + // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare + "kare", + + // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller + "keller", + + // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana + "khorana", + + // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby + "kilby", + + // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch + "kirch", + + // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth + "knuth", + + // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya + "kowalevski", + + // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande + "lalande", + + // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr + "lamarr", + + // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey + "leakey", + + // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt + "leavitt", + + // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum + "lichterman", + + // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov + "liskov", + + // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) + "lovelace", + + // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re + "lumiere", + + // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) + "mahavira", + + // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer + "mayer", + + // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) + "mccarthy", + + // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock + "mcclintock", + + // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean + "mclean", + + // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli + "mcnulty", + + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner + "meitner", + + // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky + "meninsky", + + // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf + "mestorf", + + // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky + "minsky", + + // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani + "mirzakhani", + + // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse + "morse", + + // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock + "murdock", + + // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton + "newton", + + // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel + "nobel", + + // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether + "noether", + + // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 + "northcutt", + + // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce + "noyce", + + // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems + "panini", + + // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 + "pare", + + // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. + "pasteur", + + // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin + "payne", + + // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman + "perlman", + + // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike + "pike", + + // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 + "poincare", + + // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras + "poitras", + + // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy + "ptolemy", + + // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman + "raman", + + // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan + "ramanujan", + + // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride + "ride", + + // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie + "ritchie", + + // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen + "roentgen", + + // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin + "rosalind", + + // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha + "saha", + + // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet + "sammet", + + // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) + "shaw", + + // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley + "shirley", + + // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley + "shockley", + + // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi + "sinoussi", + + // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton + "snyder", + + // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence + "spence", + + // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman + "stallman", + + // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker + "stonebraker", + + // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson + "swanson", + + // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz + "swartz", + + // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles + "swirles", + + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla + "tesla", + + // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson + "thompson", + + // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds + "torvalds", + + // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. + "turing", + + // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions + "varahamihira", + + // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya + "visvesvaraya", + + // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard + "volhard", + + // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer + "wescoff", + + // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams + "williams", + + // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson + "wilson", + + // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing + "wing", + + // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak + "wozniak", + + // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers + "wright", + + // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow + "yalow", + + // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath + "yonath", + } +) + +// GetRandomName generates a random name from the list of adjectives and surnames in this package +// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random +// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` +func GetRandomName(retry int) string { + rnd := random.Rand +begin: + name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) + if name == "boring_wozniak" /* Steve Wozniak is not boring */ { + goto begin + } + + if retry > 0 { + name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) + } + return name +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go new file mode 100644 index 00000000..a21ba137 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go @@ -0,0 +1,100 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + var ( + err error + ) + + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 00000000..7d12fcbd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,19 @@ +package kernel + +import ( + "syscall" +) + +// Utsname represents the system name structure. +// It is passthgrouh for syscall.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 00000000..79c66b32 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go new file mode 100644 index 00000000..0589cf2a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go @@ -0,0 +1,18 @@ +package operatingsystem + +import ( + "errors" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + // TODO: Implement OS detection + return "", errors.New("Cannot detect OS version") +} + +// IsContainerized returns true if we are running inside a container. +// No-op on FreeBSD, always returns false. +func IsContainerized() (bool, error) { + // TODO: Implement jail detection + return false, errors.New("Cannot detect if we are in container") +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go new file mode 100644 index 00000000..e04a3499 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go @@ -0,0 +1,77 @@ +// Package operatingsystem provides helper function to get the operating system +// name for different platforms. +package operatingsystem + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/mattn/go-shellwords" +) + +var ( + // file to use to detect if the daemon is running in a container + proc1Cgroup = "/proc/1/cgroup" + + // file to check to determine Operating System + etcOsRelease = "/etc/os-release" + + // used by stateless systems like Clear Linux + altOsRelease = "/usr/lib/os-release" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + osReleaseFile, err := os.Open(etcOsRelease) + if err != nil { + if !os.IsNotExist(err) { + return "", fmt.Errorf("Error opening %s: %v", etcOsRelease, err) + } + osReleaseFile, err = os.Open(altOsRelease) + if err != nil { + return "", fmt.Errorf("Error opening %s: %v", altOsRelease, err) + } + } + defer osReleaseFile.Close() + + var prettyName string + scanner := bufio.NewScanner(osReleaseFile) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "PRETTY_NAME=") { + data := strings.SplitN(line, "=", 2) + prettyNames, err := shellwords.Parse(data[1]) + if err != nil { + return "", fmt.Errorf("PRETTY_NAME is invalid: %s", err.Error()) + } + if len(prettyNames) != 1 { + return "", fmt.Errorf("PRETTY_NAME needs to be enclosed by quotes if they have spaces: %s", data[1]) + } + prettyName = prettyNames[0] + } + } + if prettyName != "" { + return prettyName, nil + } + // If not set, defaults to PRETTY_NAME="Linux" + // c.f. http://www.freedesktop.org/software/systemd/man/os-release.html + return "Linux", nil +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + b, err := ioutil.ReadFile(proc1Cgroup) + if err != nil { + return false, err + } + for _, line := range bytes.Split(b, []byte{'\n'}) { + if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) && !bytes.HasSuffix(line, []byte("init.scope")) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/github.com/docker/docker/pkg/parsers/parsers.go new file mode 100644 index 00000000..acc89716 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/parsers.go @@ -0,0 +1,69 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "strconv" + "strings" +) + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go new file mode 100644 index 00000000..58cc4017 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go @@ -0,0 +1,50 @@ +// Package pidfile provides structure and helper functions to create and remove +// PID file. A PID file is usually a file used to store the process ID of a +// running process. +package pidfile + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" +) + +// PIDFile is a file used to store the process ID of a running process. +type PIDFile struct { + path string +} + +func checkPIDFileAlreadyExists(path string) error { + if pidByte, err := ioutil.ReadFile(path); err == nil { + pidString := strings.TrimSpace(string(pidByte)) + if pid, err := strconv.Atoi(pidString); err == nil { + if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil { + return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path) + } + } + } + return nil +} + +// New creates a PIDfile using the specified path. +func New(path string) (*PIDFile, error) { + if err := checkPIDFileAlreadyExists(path); err != nil { + return nil, err + } + if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil { + return nil, err + } + + return &PIDFile{path: path}, nil +} + +// Remove removes the PIDFile. +func (file PIDFile) Remove() error { + if err := os.Remove(file.path); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_freebsd.go b/vendor/github.com/docker/docker/pkg/platform/architecture_freebsd.go new file mode 100644 index 00000000..992987e4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/architecture_freebsd.go @@ -0,0 +1,15 @@ +package platform + +import ( + "github.com/docker/containerd/subreaper/exec" +) + +// runtimeArchitecture get the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + cmd := exec.Command("uname", "-m") + machine, err := cmd.Output() + if err != nil { + return "", err + } + return string(machine), nil +} diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go b/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go new file mode 100644 index 00000000..30c58c78 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go @@ -0,0 +1,16 @@ +// Package platform provides helper function to get the runtime architecture +// for different platforms. +package platform + +import ( + "syscall" +) + +// runtimeArchitecture get the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + utsname := &syscall.Utsname{} + if err := syscall.Uname(utsname); err != nil { + return "", err + } + return charsToString(utsname.Machine), nil +} diff --git a/vendor/github.com/docker/docker/pkg/platform/platform.go b/vendor/github.com/docker/docker/pkg/platform/platform.go new file mode 100644 index 00000000..59e25295 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/platform.go @@ -0,0 +1,23 @@ +package platform + +import ( + "runtime" + + "github.com/Sirupsen/logrus" +) + +var ( + // Architecture holds the runtime architecture of the process. + Architecture string + // OSType holds the runtime operating system type (Linux, …) of the process. + OSType string +) + +func init() { + var err error + Architecture, err = runtimeArchitecture() + if err != nil { + logrus.Errorf("Could no read system architecture info: %v", err) + } + OSType = runtime.GOOS +} diff --git a/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go b/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go new file mode 100644 index 00000000..5dcbadfd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go @@ -0,0 +1,18 @@ +// +build linux,386 linux,amd64 linux,arm64 +// see golang's sources src/syscall/ztypes_linux_*.go that use int8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of signed int8 +func charsToString(ca [65]int8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = uint8(ca[lens]) + } + return string(s[0:lens]) +} diff --git a/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go b/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go new file mode 100644 index 00000000..c9875cf6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go @@ -0,0 +1,18 @@ +// +build linux,arm linux,ppc64 linux,ppc64le s390x +// see golang's sources src/syscall/ztypes_linux_*.go that use uint8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of unsigned uint8 +func charsToString(ca [65]uint8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = ca[lens] + } + return string(s[0:lens]) +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go index b83b5ae6..4f270a40 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/plugins.go +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins.go @@ -67,7 +67,7 @@ type Plugin struct { // error produced by activation activateErr error - // specifies if the activation sequence is completed (not if it is successful or not) + // specifies if the activation sequence is completed (not if it is sucessful or not) activated bool // wait for activation to finish activateWait *sync.Cond diff --git a/vendor/github.com/docker/docker/pkg/pubsub/publisher.go b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go new file mode 100644 index 00000000..09364617 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go @@ -0,0 +1,111 @@ +package pubsub + +import ( + "sync" + "time" +) + +var wgPool = sync.Pool{New: func() interface{} { return new(sync.WaitGroup) }} + +// NewPublisher creates a new pub/sub publisher to broadcast messages. +// The duration is used as the send timeout as to not block the publisher publishing +// messages to other clients if one client is slow or unresponsive. +// The buffer is used when creating new channels for subscribers. +func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { + return &Publisher{ + buffer: buffer, + timeout: publishTimeout, + subscribers: make(map[subscriber]topicFunc), + } +} + +type subscriber chan interface{} +type topicFunc func(v interface{}) bool + +// Publisher is basic pub/sub structure. Allows to send events and subscribe +// to them. Can be safely used from multiple goroutines. +type Publisher struct { + m sync.RWMutex + buffer int + timeout time.Duration + subscribers map[subscriber]topicFunc +} + +// Len returns the number of subscribers for the publisher +func (p *Publisher) Len() int { + p.m.RLock() + i := len(p.subscribers) + p.m.RUnlock() + return i +} + +// Subscribe adds a new subscriber to the publisher returning the channel. +func (p *Publisher) Subscribe() chan interface{} { + return p.SubscribeTopic(nil) +} + +// SubscribeTopic adds a new subscriber that filters messages sent by a topic. +func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { + ch := make(chan interface{}, p.buffer) + p.m.Lock() + p.subscribers[ch] = topic + p.m.Unlock() + return ch +} + +// Evict removes the specified subscriber from receiving any more messages. +func (p *Publisher) Evict(sub chan interface{}) { + p.m.Lock() + delete(p.subscribers, sub) + close(sub) + p.m.Unlock() +} + +// Publish sends the data in v to all subscribers currently registered with the publisher. +func (p *Publisher) Publish(v interface{}) { + p.m.RLock() + if len(p.subscribers) == 0 { + p.m.RUnlock() + return + } + + wg := wgPool.Get().(*sync.WaitGroup) + for sub, topic := range p.subscribers { + wg.Add(1) + go p.sendTopic(sub, topic, v, wg) + } + wg.Wait() + wgPool.Put(wg) + p.m.RUnlock() +} + +// Close closes the channels to all subscribers registered with the publisher. +func (p *Publisher) Close() { + p.m.Lock() + for sub := range p.subscribers { + delete(p.subscribers, sub) + close(sub) + } + p.m.Unlock() +} + +func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) { + defer wg.Done() + if topic != nil && !topic(v) { + return + } + + // send under a select as to not block if the receiver is unavailable + if p.timeout > 0 { + select { + case sub <- v: + case <-time.After(p.timeout): + } + return + } + + select { + case sub <- v: + default: + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_freebsd.go b/vendor/github.com/docker/docker/pkg/reexec/command_freebsd.go index c7f797a5..a540810a 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/command_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/reexec/command_freebsd.go @@ -3,7 +3,7 @@ package reexec import ( - "os/exec" + "github.com/docker/containerd/subreaper/exec" ) // Self returns the path to the current process's binary. diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go index 3c3a73a9..5f9d1394 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go +++ b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go @@ -3,7 +3,8 @@ package reexec import ( - "os/exec" + osExec "os/exec" + "github.com/docker/containerd/subreaper/exec" "syscall" ) @@ -19,10 +20,12 @@ func Self() string { // it is thus safe to delete or replace the on-disk binary (os.Args[0]). func Command(args ...string) *exec.Cmd { return &exec.Cmd{ - Path: Self(), - Args: args, - SysProcAttr: &syscall.SysProcAttr{ - Pdeathsig: syscall.SIGTERM, + Cmd: osExec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, + }, }, } } diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go index ad4ea38e..7f1a99cf 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go @@ -3,7 +3,7 @@ package reexec import ( - "os/exec" + "github.com/docker/containerd/subreaper/exec" ) // Command is unsupported on operating systems apart from Linux and Windows. diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go deleted file mode 100644 index 8d65e0ae..00000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build windows - -package reexec - -import ( - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which have Path as current binary. -// For example if current binary is "docker.exe" at "C:\", then cmd.Path will -// be set to "C:\docker.exe". -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go index c56671d9..78ecce63 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/reexec.go +++ b/vendor/github.com/docker/docker/pkg/reexec/reexec.go @@ -3,8 +3,10 @@ package reexec import ( "fmt" "os" - "os/exec" + "path" "path/filepath" + + "github.com/docker/containerd/subreaper/exec" ) var registeredInitializers = make(map[string]func()) @@ -12,7 +14,7 @@ var registeredInitializers = make(map[string]func()) // Register adds an initialization func under the specified name func Register(name string, initializer func()) { if _, exists := registeredInitializers[name]; exists { - panic(fmt.Sprintf("reexec func already registered under name %q", name)) + panic(fmt.Sprintf("reexec func already registred under name %q", name)) } registeredInitializers[name] = initializer @@ -22,6 +24,9 @@ func Register(name string, initializer func()) { // initialization function was called. func Init() bool { initializer, exists := registeredInitializers[os.Args[0]] + if !exists { + initializer, exists = registeredInitializers[path.Base(os.Args[0])] + } if exists { initializer() diff --git a/vendor/github.com/docker/docker/pkg/registrar/registrar.go b/vendor/github.com/docker/docker/pkg/registrar/registrar.go new file mode 100644 index 00000000..8910197f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/registrar/registrar.go @@ -0,0 +1,127 @@ +// Package registrar provides name registration. It reserves a name to a given key. +package registrar + +import ( + "errors" + "sync" +) + +var ( + // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved + ErrNameReserved = errors.New("name is reserved") + // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved + ErrNameNotReserved = errors.New("name is not reserved") + // ErrNoSuchKey is returned when trying to find the names for a key which is not known + ErrNoSuchKey = errors.New("provided key does not exist") +) + +// Registrar stores indexes a list of keys and their registered names as well as indexes names and the key that they are registred to +// Names must be unique. +// Registrar is safe for concurrent access. +type Registrar struct { + idx map[string][]string + names map[string]string + mu sync.Mutex +} + +// NewRegistrar creates a new Registrar with the an empty index +func NewRegistrar() *Registrar { + return &Registrar{ + idx: make(map[string][]string), + names: make(map[string]string), + } +} + +// Reserve registers a key to a name +// Reserve is idempotent +// Attempting to reserve a key to a name that already exists results in an `ErrNameReserved` +// A name reservation is globally unique +func (r *Registrar) Reserve(name, key string) error { + r.mu.Lock() + defer r.mu.Unlock() + + if k, exists := r.names[name]; exists { + if k != key { + return ErrNameReserved + } + return nil + } + + r.idx[key] = append(r.idx[key], name) + r.names[name] = key + return nil +} + +// Release releases the reserved name +// Once released, a name can be reserved again +func (r *Registrar) Release(name string) { + r.mu.Lock() + defer r.mu.Unlock() + + key, exists := r.names[name] + if !exists { + return + } + + for i, n := range r.idx[key] { + if n != name { + continue + } + r.idx[key] = append(r.idx[key][:i], r.idx[key][i+1:]...) + break + } + + delete(r.names, name) + + if len(r.idx[key]) == 0 { + delete(r.idx, key) + } +} + +// Delete removes all reservations for the passed in key. +// All names reserved to this key are released. +func (r *Registrar) Delete(key string) { + r.mu.Lock() + for _, name := range r.idx[key] { + delete(r.names, name) + } + delete(r.idx, key) + r.mu.Unlock() +} + +// GetNames lists all the reserved names for the given key +func (r *Registrar) GetNames(key string) ([]string, error) { + r.mu.Lock() + defer r.mu.Unlock() + + names, exists := r.idx[key] + if !exists { + return nil, ErrNoSuchKey + } + return names, nil +} + +// Get returns the key that the passed in name is reserved to +func (r *Registrar) Get(name string) (string, error) { + r.mu.Lock() + key, exists := r.names[name] + r.mu.Unlock() + + if !exists { + return "", ErrNameNotReserved + } + return key, nil +} + +// GetAll returns all registered names +func (r *Registrar) GetAll() map[string][]string { + out := make(map[string][]string) + + r.mu.Lock() + // copy index into out + for id, names := range r.idx { + out[id] = names + } + r.mu.Unlock() + return out +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go deleted file mode 100644 index 698cbf2d..00000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build windows - -package signal - -import ( - "syscall" -) - -// Signals used in api/client (no windows equivalent, use -// invalid signals so they don't get handled) -const ( - SIGCHLD = syscall.Signal(0xff) - SIGWINCH = syscall.Signal(0xff) - SIGPIPE = syscall.Signal(0xff) - // DefaultStopSignal is the syscall signal used to stop a container in windows systems. - DefaultStopSignal = "15" -) - -// SignalMap is a map of "supported" signals. As per the comment in GOLang's -// ztypes_windows.go: "More invented values for signals". Windows doesn't -// really support signals in any way, shape or form that Unix does. -// -// We have these so that docker kill can be used to gracefully (TERM) and -// forcibly (KILL) terminate a container on Windows. -var SignalMap = map[string]syscall.Signal{ - "KILL": syscall.SIGKILL, - "TERM": syscall.SIGTERM, -} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go index 8f67ece9..b37ae39f 100644 --- a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -1,12 +1,10 @@ package stdcopy import ( - "bytes" "encoding/binary" "errors" "fmt" "io" - "sync" "github.com/Sirupsen/logrus" ) @@ -30,8 +28,6 @@ const ( startingBufLen = 32*1024 + stdWriterPrefixLen + 1 ) -var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} - // stdWriter is wrapper of io.Writer with extra customized info. type stdWriter struct { io.Writer @@ -39,31 +35,28 @@ type stdWriter struct { } // Write sends the buffer to the underneath writer. -// It inserts the prefix header before the buffer, +// It insert the prefix header before the buffer, // so stdcopy.StdCopy knows where to multiplex the output. // It makes stdWriter to implement io.Writer. -func (w *stdWriter) Write(p []byte) (n int, err error) { +func (w *stdWriter) Write(buf []byte) (n int, err error) { if w == nil || w.Writer == nil { return 0, errors.New("Writer not instantiated") } - if p == nil { + if buf == nil { return 0, nil } header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} - binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) - buf := bufPool.Get().(*bytes.Buffer) - buf.Write(header[:]) - buf.Write(p) + binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(buf))) - n, err = w.Writer.Write(buf.Bytes()) + line := append(header[:], buf...) + + n, err = w.Writer.Write(line) n -= stdWriterPrefixLen + if n < 0 { n = 0 } - - buf.Reset() - bufPool.Put(buf) return } diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go index 161184ff..02d2594e 100644 --- a/vendor/github.com/docker/docker/pkg/stringid/stringid.go +++ b/vendor/github.com/docker/docker/pkg/stringid/stringid.go @@ -24,7 +24,7 @@ func IsShortID(id string) bool { // TruncateID returns a shorthand version of a string identifier for convenience. // A collision with other shorthands is very unlikely, but possible. // In case of a collision a lookup with TruncIndex.Get() will fail, and the caller -// will need to use a longer prefix, or the full-length Id. +// will need to use a langer prefix, or the full-length Id. func TruncateID(id string) string { if i := strings.IndexRune(id, ':'); i >= 0 { id = id[i+1:] @@ -57,7 +57,7 @@ func generateID(crypto bool) string { } } -// GenerateRandomID returns a unique id. +// GenerateRandomID returns an unique id. func GenerateRandomID() string { return generateID(true) diff --git a/vendor/github.com/docker/docker/pkg/stringutils/README.md b/vendor/github.com/docker/docker/pkg/stringutils/README.md new file mode 100644 index 00000000..b3e45457 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringutils/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with strings diff --git a/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go new file mode 100644 index 00000000..41a0d2eb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go @@ -0,0 +1,87 @@ +// Package stringutils provides helper functions for dealing with strings. +package stringutils + +import ( + "bytes" + "math/rand" + "strings" + + "github.com/docker/docker/pkg/random" +) + +// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[random.Rand.Intn(len(letters))] + } + return string(b) +} + +// GenerateRandomASCIIString generates an ASCII random stirng with length n. +func GenerateRandomASCIIString(n int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + res := make([]byte, n) + for i := 0; i < n; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + +// Truncate truncates a string to maxlen. +func Truncate(s string, maxlen int) string { + if len(s) <= maxlen { + return s + } + return s[:maxlen] +} + +// InSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and a open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// ShellQuoteArguments takes a list of strings and escapes them so they will be +// handled right when passed as arguments to an program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go deleted file mode 100644 index 449fe564..00000000 --- a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go +++ /dev/null @@ -1,155 +0,0 @@ -package symlink - -import ( - "bytes" - "errors" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/docker/docker/pkg/longpath" -) - -func toShort(path string) (string, error) { - p, err := syscall.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetShortPathName says we can reuse buffer - n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - if _, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { - return "", err - } - } - return syscall.UTF16ToString(b), nil -} - -func toLong(path string) (string, error) { - p, err := syscall.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetLongPathName says we can reuse buffer - n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - } - b = b[:n] - return syscall.UTF16ToString(b), nil -} - -func evalSymlinks(path string) (string, error) { - path, err := walkSymlinks(path) - if err != nil { - return "", err - } - - p, err := toShort(path) - if err != nil { - return "", err - } - p, err = toLong(p) - if err != nil { - return "", err - } - // syscall.GetLongPathName does not change the case of the drive letter, - // but the result of EvalSymlinks must be unique, so we have - // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). - // Make drive letter upper case. - if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { - p = string(p[0]+'A'-'a') + p[1:] - } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { - p = p[:3] + string(p[4]+'A'-'a') + p[5:] - } - return filepath.Clean(p), nil -} - -const utf8RuneSelf = 0x80 - -func walkSymlinks(path string) (string, error) { - const maxIter = 255 - originalPath := path - // consume path by taking each frontmost path element, - // expanding it if it's a symlink, and appending it to b - var b bytes.Buffer - for n := 0; path != ""; n++ { - if n > maxIter { - return "", errors.New("EvalSymlinks: too many links in " + originalPath) - } - - // A path beginning with `\\?\` represents the root, so automatically - // skip that part and begin processing the next segment. - if strings.HasPrefix(path, longpath.Prefix) { - b.WriteString(longpath.Prefix) - path = path[4:] - continue - } - - // find next path component, p - var i = -1 - for j, c := range path { - if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { - i = j - break - } - } - var p string - if i == -1 { - p, path = path, "" - } else { - p, path = path[:i], path[i+1:] - } - - if p == "" { - if b.Len() == 0 { - // must be absolute path - b.WriteRune(filepath.Separator) - } - continue - } - - // If this is the first segment after the long path prefix, accept the - // current segment as a volume root or UNC share and move on to the next. - if b.String() == longpath.Prefix { - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - - fi, err := os.Lstat(b.String() + p) - if err != nil { - return "", err - } - if fi.Mode()&os.ModeSymlink == 0 { - b.WriteString(p) - if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { - b.WriteRune(filepath.Separator) - } - continue - } - - // it's a symlink, put it at the front of path - dest, err := os.Readlink(b.String() + p) - if err != nil { - return "", err - } - if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { - b.Reset() - } - path = dest + string(filepath.Separator) + path - } - return filepath.Clean(b.String()), nil -} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/README.md b/vendor/github.com/docker/docker/pkg/sysinfo/README.md new file mode 100644 index 00000000..c1530cef --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/README.md @@ -0,0 +1 @@ +SysInfo stores information about which features a kernel supports. diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go new file mode 100644 index 00000000..cbd00999 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go @@ -0,0 +1,128 @@ +package sysinfo + +import "github.com/docker/docker/pkg/parsers" + +// SysInfo stores information about which features a kernel supports. +// TODO Windows: Factor out platform specific capabilities. +type SysInfo struct { + // Whether the kernel supports AppArmor or not + AppArmor bool + // Whether the kernel supports Seccomp or not + Seccomp bool + + cgroupMemInfo + cgroupCPUInfo + cgroupBlkioInfo + cgroupCpusetInfo + cgroupPids + + // Whether IPv4 forwarding is supported or not, if this was disabled, networking will not work + IPv4ForwardingDisabled bool + + // Whether bridge-nf-call-iptables is supported or not + BridgeNFCallIPTablesDisabled bool + + // Whether bridge-nf-call-ip6tables is supported or not + BridgeNFCallIP6TablesDisabled bool + + // Whether the cgroup has the mountpoint of "devices" or not + CgroupDevicesEnabled bool +} + +type cgroupMemInfo struct { + // Whether memory limit is supported or not + MemoryLimit bool + + // Whether swap limit is supported or not + SwapLimit bool + + // Whether soft limit is supported or not + MemoryReservation bool + + // Whether OOM killer disable is supported or not + OomKillDisable bool + + // Whether memory swappiness is supported or not + MemorySwappiness bool + + // Whether kernel memory limit is supported or not + KernelMemory bool +} + +type cgroupCPUInfo struct { + // Whether CPU shares is supported or not + CPUShares bool + + // Whether CPU CFS(Completely Fair Scheduler) period is supported or not + CPUCfsPeriod bool + + // Whether CPU CFS(Completely Fair Scheduler) quota is supported or not + CPUCfsQuota bool +} + +type cgroupBlkioInfo struct { + // Whether Block IO weight is supported or not + BlkioWeight bool + + // Whether Block IO weight_device is supported or not + BlkioWeightDevice bool + + // Whether Block IO read limit in bytes per second is supported or not + BlkioReadBpsDevice bool + + // Whether Block IO write limit in bytes per second is supported or not + BlkioWriteBpsDevice bool + + // Whether Block IO read limit in IO per second is supported or not + BlkioReadIOpsDevice bool + + // Whether Block IO write limit in IO per second is supported or not + BlkioWriteIOpsDevice bool +} + +type cgroupCpusetInfo struct { + // Whether Cpuset is supported or not + Cpuset bool + + // Available Cpuset's cpus + Cpus string + + // Available Cpuset's memory nodes + Mems string +} + +type cgroupPids struct { + // Whether Pids Limit is supported or not + PidsLimit bool +} + +// IsCpusetCpusAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.cpus set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetCpusAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Cpus) +} + +// IsCpusetMemsAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.mems set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetMemsAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Mems) +} + +func isCpusetListAvailable(provided, available string) (bool, error) { + parsedProvided, err := parsers.ParseUintList(provided) + if err != nil { + return false, err + } + parsedAvailable, err := parsers.ParseUintList(available) + if err != nil { + return false, err + } + for k := range parsedProvided { + if !parsedAvailable[k] { + return false, nil + } + } + return true, nil +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_freebsd.go new file mode 100644 index 00000000..22ae0d95 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_freebsd.go @@ -0,0 +1,7 @@ +package sysinfo + +// New returns an empty SysInfo for freebsd for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go new file mode 100644 index 00000000..41fb0d2b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go @@ -0,0 +1,246 @@ +package sysinfo + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/cgroups" +) + +const ( + // SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER. + SeccompModeFilter = uintptr(2) +) + +func findCgroupMountpoints() (map[string]string, error) { + cgMounts, err := cgroups.GetCgroupMounts() + if err != nil { + return nil, fmt.Errorf("Failed to parse cgroup information: %v", err) + } + mps := make(map[string]string) + for _, m := range cgMounts { + for _, ss := range m.Subsystems { + mps[ss] = m.Mountpoint + } + } + return mps, nil +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. If `quiet` is `false` warnings are printed in logs +// whenever an error occurs or misconfigurations are present. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + cgMounts, err := findCgroupMountpoints() + if err != nil { + logrus.Warnf("Failed to parse cgroup information: %v", err) + } else { + sysInfo.cgroupMemInfo = checkCgroupMem(cgMounts, quiet) + sysInfo.cgroupCPUInfo = checkCgroupCPU(cgMounts, quiet) + sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(cgMounts, quiet) + sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(cgMounts, quiet) + sysInfo.cgroupPids = checkCgroupPids(quiet) + } + + _, ok := cgMounts["devices"] + sysInfo.CgroupDevicesEnabled = ok + + sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward") + sysInfo.BridgeNFCallIPTablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables") + sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables") + + // Check if AppArmor is supported. + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + sysInfo.AppArmor = true + } + + // Check if Seccomp is supported, via CONFIG_SECCOMP. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL { + sysInfo.Seccomp = true + } + } + + return sysInfo +} + +// checkCgroupMem reads the memory information from the memory cgroup mount point. +func checkCgroupMem(cgMounts map[string]string, quiet bool) cgroupMemInfo { + mountPoint, ok := cgMounts["memory"] + if !ok { + if !quiet { + logrus.Warnf("Your kernel does not support cgroup memory limit") + } + return cgroupMemInfo{} + } + + swapLimit := cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes") + if !quiet && !swapLimit { + logrus.Warn("Your kernel does not support swap memory limit.") + } + memoryReservation := cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes") + if !quiet && !memoryReservation { + logrus.Warn("Your kernel does not support memory reservation.") + } + oomKillDisable := cgroupEnabled(mountPoint, "memory.oom_control") + if !quiet && !oomKillDisable { + logrus.Warnf("Your kernel does not support oom control.") + } + memorySwappiness := cgroupEnabled(mountPoint, "memory.swappiness") + if !quiet && !memorySwappiness { + logrus.Warnf("Your kernel does not support memory swappiness.") + } + kernelMemory := cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes") + if !quiet && !kernelMemory { + logrus.Warnf("Your kernel does not support kernel memory limit.") + } + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: swapLimit, + MemoryReservation: memoryReservation, + OomKillDisable: oomKillDisable, + MemorySwappiness: memorySwappiness, + KernelMemory: kernelMemory, + } +} + +// checkCgroupCPU reads the cpu information from the cpu cgroup mount point. +func checkCgroupCPU(cgMounts map[string]string, quiet bool) cgroupCPUInfo { + mountPoint, ok := cgMounts["cpu"] + if !ok { + if !quiet { + logrus.Warnf("Unable to find cpu cgroup in mounts") + } + return cgroupCPUInfo{} + } + + cpuShares := cgroupEnabled(mountPoint, "cpu.shares") + if !quiet && !cpuShares { + logrus.Warn("Your kernel does not support cgroup cpu shares") + } + + cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us") + if !quiet && !cpuCfsPeriod { + logrus.Warn("Your kernel does not support cgroup cfs period") + } + + cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us") + if !quiet && !cpuCfsQuota { + logrus.Warn("Your kernel does not support cgroup cfs quotas") + } + return cgroupCPUInfo{ + CPUShares: cpuShares, + CPUCfsPeriod: cpuCfsPeriod, + CPUCfsQuota: cpuCfsQuota, + } +} + +// checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point. +func checkCgroupBlkioInfo(cgMounts map[string]string, quiet bool) cgroupBlkioInfo { + mountPoint, ok := cgMounts["blkio"] + if !ok { + if !quiet { + logrus.Warnf("Unable to find blkio cgroup in mounts") + } + return cgroupBlkioInfo{} + } + + weight := cgroupEnabled(mountPoint, "blkio.weight") + if !quiet && !weight { + logrus.Warn("Your kernel does not support cgroup blkio weight") + } + + weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device") + if !quiet && !weightDevice { + logrus.Warn("Your kernel does not support cgroup blkio weight_device") + } + + readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device") + if !quiet && !readBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device") + } + + writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device") + if !quiet && !writeBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device") + } + readIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device") + if !quiet && !readIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_iops_device") + } + + writeIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device") + if !quiet && !writeIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_iops_device") + } + return cgroupBlkioInfo{ + BlkioWeight: weight, + BlkioWeightDevice: weightDevice, + BlkioReadBpsDevice: readBpsDevice, + BlkioWriteBpsDevice: writeBpsDevice, + BlkioReadIOpsDevice: readIOpsDevice, + BlkioWriteIOpsDevice: writeIOpsDevice, + } +} + +// checkCgroupCpusetInfo reads the cpuset information from the cpuset cgroup mount point. +func checkCgroupCpusetInfo(cgMounts map[string]string, quiet bool) cgroupCpusetInfo { + mountPoint, ok := cgMounts["cpuset"] + if !ok { + if !quiet { + logrus.Warnf("Unable to find cpuset cgroup in mounts") + } + return cgroupCpusetInfo{} + } + + cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus")) + if err != nil { + return cgroupCpusetInfo{} + } + + mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems")) + if err != nil { + return cgroupCpusetInfo{} + } + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: strings.TrimSpace(string(cpus)), + Mems: strings.TrimSpace(string(mems)), + } +} + +// checkCgroupPids reads the pids information from the pids cgroup mount point. +func checkCgroupPids(quiet bool) cgroupPids { + _, err := cgroups.FindCgroupMountpoint("pids") + if err != nil { + if !quiet { + logrus.Warn(err) + } + return cgroupPids{} + } + + return cgroupPids{ + PidsLimit: true, + } +} + +func cgroupEnabled(mountPoint, name string) bool { + _, err := os.Stat(path.Join(mountPoint, name)) + return err == nil +} + +func readProcBool(path string) bool { + val, err := ioutil.ReadFile(path) + if err != nil { + return false + } + return strings.TrimSpace(string(val)) == "1" +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go deleted file mode 100644 index 29458684..00000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build windows - -package system - -import ( - "syscall" - "time" -) - -//setCTime will set the create time on a file. On Windows, this requires -//calling SetFileTime and explicitly including the create time. -func setCTime(path string, ctime time.Time) error { - ctimespec := syscall.NsecToTimespec(ctime.UnixNano()) - pathp, e := syscall.UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := syscall.CreateFile(pathp, - syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, - syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e - } - defer syscall.Close(h) - c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec)) - return syscall.SetFileTime(h, &c, nil, nil) -} diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go deleted file mode 100644 index 04e2de78..00000000 --- a/vendor/github.com/docker/docker/pkg/system/events_windows.go +++ /dev/null @@ -1,83 +0,0 @@ -package system - -// This file implements syscalls for Win32 events which are not implemented -// in golang. - -import ( - "syscall" - "unsafe" -) - -var ( - procCreateEvent = modkernel32.NewProc("CreateEventW") - procOpenEvent = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") -) - -// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. -func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if manualReset { - _p1 = 1 - } - var _p2 uint32 - if initialState { - _p2 = 1 - } - r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. -func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if inheritHandle { - _p1 = 1 - } - r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// SetEvent implements win32 SetEvent func in golang. -func SetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procSetEvent) -} - -// ResetEvent implements win32 ResetEvent func in golang. -func ResetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procResetEvent) -} - -// PulseEvent implements win32 PulseEvent func in golang. -func PulseEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procPulseEvent) -} - -func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { - r0, _, _ := proc.Call(uintptr(handle)) - if r0 != 0 { - err = syscall.Errno(r0) - } - return -} - -var temp unsafe.Pointer - -// use ensures a variable is kept alive without the GC freeing while still needed -func use(p unsafe.Pointer) { - temp = p -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go deleted file mode 100644 index 16823d55..00000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ /dev/null @@ -1,82 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "path/filepath" - "regexp" - "strings" - "syscall" -) - -// MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, perm os.FileMode) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { - return nil - } - - // The rest of this method is copied from os.MkdirAll and should be kept - // as-is to ensure compatibility. - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = MkdirAll(path[0:j-1], perm) - if err != nil { - return err - } - } - - // Parent now exists; invoke Mkdir and use its result. - err = os.Mkdir(path, perm) - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if !filepath.IsAbs(path) { - if !strings.HasPrefix(path, string(os.PathSeparator)) { - return false - } - } - return true -} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go deleted file mode 100644 index 49e87eb4..00000000 --- a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package system - -import ( - "os" -) - -// Lstat calls os.Lstat to get a fileinfo interface back. -// This is then copied into our own locally defined structure. -// Note the Linux version uses fromStatT to do the copy back, -// but that not strictly necessary when already in an OS specific module. -func Lstat(path string) (*StatT, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - - return &StatT{ - name: fi.Name(), - size: fi.Size(), - mode: fi.Mode(), - modTime: fi.ModTime(), - isDir: fi.IsDir()}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go deleted file mode 100644 index d4664259..00000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go +++ /dev/null @@ -1,44 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type memorystatusex struct { - dwLength uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - msi := &memorystatusex{ - dwLength: 64, - } - r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) - if r1 == 0 { - return &MemInfo{}, nil - } - return &MemInfo{ - MemTotal: int64(msi.ullTotalPhys), - MemFree: int64(msi.ullAvailPhys), - SwapTotal: int64(msi.ullTotalPageFile), - SwapFree: int64(msi.ullAvailPageFile), - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go deleted file mode 100644 index 2e863c02..00000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package system - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go deleted file mode 100644 index 09e7f89f..00000000 --- a/vendor/github.com/docker/docker/pkg/system/path_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build windows - -package system - -// DefaultPathEnv is deliberately empty on Windows as the default path will be set by -// the container. Docker has no context of what the default path should be. -const DefaultPathEnv = "" diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go deleted file mode 100644 index 39490c62..00000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "time" -) - -// StatT type contains status of a file. It contains metadata -// like name, permission, size, etc about a file. -type StatT struct { - name string - size int64 - mode os.FileMode - modTime time.Time - isDir bool -} - -// Name returns file's name. -func (s StatT) Name() string { - return s.name -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mode returns file's permission mode. -func (s StatT) Mode() os.FileMode { - return s.mode -} - -// ModTime returns file's last modification time. -func (s StatT) ModTime() time.Time { - return s.modTime -} - -// IsDir returns whether file is actually a directory. -func (s StatT) IsDir() bool { - return s.isDir -} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go deleted file mode 100644 index ef596f34..00000000 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ /dev/null @@ -1,73 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -var ( - ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion struct { - Version uint32 - MajorVersion uint8 - MinorVersion uint8 - Build uint16 -} - -// GetOSVersion gets the operating system version on Windows. Note that -// docker.exe must be manifested to get the correct version information. -func GetOSVersion() OSVersion { - var err error - osv := OSVersion{} - osv.Version, err = syscall.GetVersion() - if err != nil { - // GetVersion never fails. - panic(err) - } - osv.MajorVersion = uint8(osv.Version & 0xFF) - osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) - osv.Build = uint16(osv.Version >> 16) - return osv -} - -// Unmount is a platform-specific helper function to call -// the unmount syscall. Not supported on Windows -func Unmount(dest string) error { - return nil -} - -// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. -func CommandLineToArgv(commandLine string) ([]string, error) { - var argc int32 - - argsPtr, err := syscall.UTF16PtrFromString(commandLine) - if err != nil { - return nil, err - } - - argv, err := syscall.CommandLineToArgv(argsPtr, &argc) - if err != nil { - return nil, err - } - defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv)))) - - newArgs := make([]string, argc) - for i, v := range (*argv)[:argc] { - newArgs[i] = string(syscall.UTF16ToString((*v)[:])) - } - - return newArgs, nil -} - -// HasWin32KSupport determines whether containers that depend on win32k can -// run on this machine. Win32k is the driver used to implement windowing. -func HasWin32KSupport() bool { - // For now, check for ntuser API support on the host. In the future, a host - // may support win32k in containers even if the host does not support ntuser - // APIs. - return ntuserApiset.Load() == nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go index 3d0146b0..c670fcd7 100644 --- a/vendor/github.com/docker/docker/pkg/system/umask.go +++ b/vendor/github.com/docker/docker/pkg/system/umask.go @@ -7,7 +7,7 @@ import ( ) // Umask sets current process's file mode creation mask to newmask -// and returns oldmask. +// and return oldmask. func Umask(newmask int) (oldmask int, err error) { return syscall.Umask(newmask), nil } diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go deleted file mode 100644 index 13f1de17..00000000 --- a/vendor/github.com/docker/docker/pkg/system/umask_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package system - -// Umask is not supported on the windows platform. -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go new file mode 100644 index 00000000..d580584d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go @@ -0,0 +1,66 @@ +// Package tailfile provides helper functions to read the nth lines of any +// ReadSeeker. +package tailfile + +import ( + "bytes" + "errors" + "io" + "os" +) + +const blockSize = 1024 + +var eol = []byte("\n") + +// ErrNonPositiveLinesNumber is an error returned if the lines number was negative. +var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") + +//TailFile returns last n lines of reader f (could be a fil). +func TailFile(f io.ReadSeeker, n int) ([][]byte, error) { + if n <= 0 { + return nil, ErrNonPositiveLinesNumber + } + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + block := -1 + var data []byte + var cnt int + for { + var b []byte + step := int64(block * blockSize) + left := size + step // how many bytes to beginning + if left < 0 { + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + b = make([]byte, blockSize+left) + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + break + } else { + b = make([]byte, blockSize) + if _, err := f.Seek(step, os.SEEK_END); err != nil { + return nil, err + } + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + } + cnt += bytes.Count(b, eol) + if cnt > n { + break + } + block-- + } + lines := bytes.Split(data, eol) + if n < len(lines) { + return lines[len(lines)-n-1 : len(lines)-1], nil + } + return lines[:len(lines)-1], nil +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go index 154788db..4dc89bd4 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go @@ -28,7 +28,6 @@ import ( "fmt" "hash" "io" - "path" "strings" ) @@ -236,7 +235,7 @@ func (ts *tarSum) Read(buf []byte) (int, error) { } return n, err } - ts.currentFile = path.Clean(currentHeader.Name) + ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") if err := ts.encodeHeader(currentHeader); err != nil { return 0, err } diff --git a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go index 59dac5ba..a22cd9d1 100644 --- a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go +++ b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go @@ -11,7 +11,7 @@ import ( import "C" // Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with +// It is passthgrouh for syscall.Termios in order to make it portable with // other platforms where it is not available or handled differently. type Termios syscall.Termios diff --git a/vendor/github.com/docker/docker/pkg/term/tc_other.go b/vendor/github.com/docker/docker/pkg/term/tc_other.go index 750d7c3f..266039ba 100644 --- a/vendor/github.com/docker/docker/pkg/term/tc_other.go +++ b/vendor/github.com/docker/docker/pkg/term/tc_other.go @@ -1,6 +1,5 @@ // +build !windows // +build !linux !cgo -// +build !solaris !cgo package term diff --git a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go deleted file mode 100644 index c9139d0c..00000000 --- a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build solaris,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 - - /* - VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned - Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It - needs to be explicitly set to 1. - */ - newState.Cc[C.VMIN] = 1 - newState.Cc[C.VTIME] = 0 - - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go index 8f554847..11ed2093 100644 --- a/vendor/github.com/docker/docker/pkg/term/term.go +++ b/vendor/github.com/docker/docker/pkg/term/term.go @@ -10,6 +10,7 @@ import ( "os" "os/signal" "syscall" + "unsafe" ) var ( @@ -46,6 +47,27 @@ func GetFdInfo(in interface{}) (uintptr, bool) { return inFd, isTerminalIn } +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) + // Skip errno = 0 + if err == 0 { + return ws, nil + } + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) + // Skip errno = 0 + if err == 0 { + return nil + } + return err +} + // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { var termios Termios diff --git a/vendor/github.com/docker/docker/pkg/term/term_solaris.go b/vendor/github.com/docker/docker/pkg/term/term_solaris.go deleted file mode 100644 index 112debbe..00000000 --- a/vendor/github.com/docker/docker/pkg/term/term_solaris.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build solaris - -package term - -import ( - "syscall" - "unsafe" -) - -/* -#include -#include -#include - -// Small wrapper to get rid of variadic args of ioctl() -int my_ioctl(int fd, int cmd, struct winsize *ws) { - return ioctl(fd, cmd, ws); -} -*/ -import "C" - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return nil - } - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_unix.go b/vendor/github.com/docker/docker/pkg/term/term_unix.go deleted file mode 100644 index ddf87a0e..00000000 --- a/vendor/github.com/docker/docker/pkg/term/term_unix.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !solaris,!windows - -package term - -import ( - "syscall" - "unsafe" -) - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return nil - } - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go deleted file mode 100644 index cd21b5fc..00000000 --- a/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ /dev/null @@ -1,301 +0,0 @@ -// +build windows - -package term - -import ( - "io" - "os" - "os/signal" - "syscall" - - "github.com/Azure/go-ansiterm/winterm" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/term/windows" -) - -// State holds the console mode for the terminal. -type State struct { - inMode, outMode uint32 - inHandle, outHandle syscall.Handle -} - -// Winsize is used for window size. -type Winsize struct { - Height uint16 - Width uint16 -} - -const ( - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx - enableVirtualTerminalInput = 0x0200 - enableVirtualTerminalProcessing = 0x0004 - disableNewlineAutoReturn = 0x0008 -) - -// usingNativeConsole is true if we are using the Windows native console -var usingNativeConsole bool - -// StdStreams returns the standard streams (stdin, stdout, stedrr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - switch { - case os.Getenv("ConEmuANSI") == "ON": - // The ConEmu terminal emulates ANSI on output streams well. - return windows.ConEmuStreams() - case os.Getenv("MSYSTEM") != "": - // MSYS (mingw) does not emulate ANSI well. - return windows.ConsoleStreams() - default: - if useNativeConsole() { - usingNativeConsole = true - return os.Stdin, os.Stdout, os.Stderr - } - return windows.ConsoleStreams() - } -} - -// useNativeConsole determines if the docker client should use the built-in -// console which supports ANSI emulation, or fall-back to the golang emulator -// (github.com/azure/go-ansiterm). -func useNativeConsole() bool { - osv := system.GetOSVersion() - - // Native console is not available before major version 10 - if osv.MajorVersion < 10 { - return false - } - - // Get the console modes. If this fails, we can't use the native console - state, err := getNativeConsole() - if err != nil { - return false - } - - // Probe the console to see if it can be enabled. - if nil != probeNativeConsole(state) { - return false - } - - // Environment variable override - if e := os.Getenv("USE_NATIVE_CONSOLE"); e != "" { - if e == "1" { - return true - } - return false - } - - // TODO Windows. The native emulator still has issues which - // mean it shouldn't be enabled for everyone. Change this next line to true - // to change the default to "enable if available". In the meantime, users - // can still try it out by using USE_NATIVE_CONSOLE env variable. - return false -} - -// getNativeConsole returns the console modes ('state') for the native Windows console -func getNativeConsole() (State, error) { - var ( - err error - state State - ) - - // Get the handle to stdout - if state.outHandle, err = syscall.GetStdHandle(syscall.STD_OUTPUT_HANDLE); err != nil { - return state, err - } - - // Get the console mode from the consoles stdout handle - if err = syscall.GetConsoleMode(state.outHandle, &state.outMode); err != nil { - return state, err - } - - // Get the handle to stdin - if state.inHandle, err = syscall.GetStdHandle(syscall.STD_INPUT_HANDLE); err != nil { - return state, err - } - - // Get the console mode from the consoles stdin handle - if err = syscall.GetConsoleMode(state.inHandle, &state.inMode); err != nil { - return state, err - } - - return state, nil -} - -// probeNativeConsole probes the console to determine if native can be supported, -func probeNativeConsole(state State) error { - if err := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode|enableVirtualTerminalProcessing); err != nil { - return err - } - defer winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode) - - if err := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode|enableVirtualTerminalInput); err != nil { - return err - } - defer winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode) - - return nil -} - -// enableNativeConsole turns on native console mode -func enableNativeConsole(state State) error { - // First attempt both enableVirtualTerminalProcessing and disableNewlineAutoReturn - if err := winterm.SetConsoleMode(uintptr(state.outHandle), - state.outMode|(enableVirtualTerminalProcessing|disableNewlineAutoReturn)); err != nil { - - // That may fail, so fallback to trying just enableVirtualTerminalProcessing - if err := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode|enableVirtualTerminalProcessing); err != nil { - return err - } - } - - if err := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode|enableVirtualTerminalInput); err != nil { - winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode) // restore out if we can - return err - } - - return nil -} - -// disableNativeConsole turns off native console mode -func disableNativeConsole(state *State) error { - // Try and restore both in an out before error checking. - errout := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode) - errin := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode) - if errout != nil { - return errout - } - if errin != nil { - return errin - } - return nil -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - return windows.GetHandleInfo(in) -} - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - return windows.IsConsole(fd) -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - if usingNativeConsole { - return disableNativeConsole(state) - } - return winterm.SetConsoleMode(fd, state.outMode) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - if usingNativeConsole { - state, err := getNativeConsole() - if err != nil { - return nil, err - } - return &state, nil - } - - mode, e := winterm.GetConsoleMode(fd) - if e != nil { - return nil, e - } - - return &State{outMode: mode}, nil -} - -// DisableEcho disables echo for the terminal connected to the given file descriptor. -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx -func DisableEcho(fd uintptr, state *State) error { - mode := state.inMode - mode &^= winterm.ENABLE_ECHO_INPUT - mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT - err := winterm.SetConsoleMode(fd, mode) - if err != nil { - return err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state. -func SetRawTerminal(fd uintptr) (*State, error) { - state, err := MakeRaw(fd) - if err != nil { - return nil, err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return state, err -} - -// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - mode := state.inMode - if usingNativeConsole { - if err := enableNativeConsole(*state); err != nil { - return nil, err - } - mode |= enableVirtualTerminalInput - } - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= winterm.ENABLE_ECHO_INPUT - mode &^= winterm.ENABLE_LINE_INPUT - mode &^= winterm.ENABLE_MOUSE_INPUT - mode &^= winterm.ENABLE_WINDOW_INPUT - mode &^= winterm.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= winterm.ENABLE_EXTENDED_FLAGS - mode |= winterm.ENABLE_INSERT_MODE - mode |= winterm.ENABLE_QUICK_EDIT_MODE - - err = winterm.SetConsoleMode(fd, mode) - if err != nil { - return nil, err - } - return state, nil -} - -func restoreAtInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go deleted file mode 100644 index 5b91b783..00000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go +++ /dev/null @@ -1,257 +0,0 @@ -// +build windows - -package windows - -import ( - "bytes" - "errors" - "fmt" - "os" - "strings" - "unsafe" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -const ( - escapeSequence = ansiterm.KEY_ESC_CSI -) - -// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. -type ansiReader struct { - file *os.File - fd uintptr - buffer []byte - cbBuffer int - command []byte -} - -func newAnsiReader(nFile int) *ansiReader { - file, fd := winterm.GetStdFile(nFile) - return &ansiReader{ - file: file, - fd: fd, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - buffer: make([]byte, 0), - } -} - -// Close closes the wrapped file. -func (ar *ansiReader) Close() (err error) { - return ar.file.Close() -} - -// Fd returns the file descriptor of the wrapped file. -func (ar *ansiReader) Fd() uintptr { - return ar.fd -} - -// Read reads up to len(p) bytes of translated input events into p. -func (ar *ansiReader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - // Previously read bytes exist, read as much as we can and return - if len(ar.buffer) > 0 { - logger.Debugf("Reading previously cached bytes") - - originalLength := len(ar.buffer) - copiedLength := copy(p, ar.buffer) - - if copiedLength == originalLength { - ar.buffer = make([]byte, 0, len(p)) - } else { - ar.buffer = ar.buffer[copiedLength:] - } - - logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) - return copiedLength, nil - } - - // Read and translate key events - events, err := readInputEvents(ar.fd, len(p)) - if err != nil { - return 0, err - } else if len(events) == 0 { - logger.Debug("No input events detected") - return 0, nil - } - - keyBytes := translateKeyEvents(events, []byte(escapeSequence)) - - // Save excess bytes and right-size keyBytes - if len(keyBytes) > len(p) { - logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) - ar.buffer = keyBytes[len(p):] - keyBytes = keyBytes[:len(p)] - } else if len(keyBytes) == 0 { - logger.Debug("No key bytes returned from the translator") - return 0, nil - } - - copiedLength := copy(p, keyBytes) - if copiedLength != len(keyBytes) { - return 0, errors.New("Unexpected copy length encountered.") - } - - logger.Debugf("Read p[%d]: % x", copiedLength, p) - logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) - return copiedLength, nil -} - -// readInputEvents polls until at least one event is available. -func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { - // Determine the maximum number of records to retrieve - // -- Cast around the type system to obtain the size of a single INPUT_RECORD. - // unsafe.Sizeof requires an expression vs. a type-reference; the casting - // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) - countRecords := maxBytes / recordSize - if countRecords > ansiterm.MAX_INPUT_EVENTS { - countRecords = ansiterm.MAX_INPUT_EVENTS - } - logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) - - // Wait for and read input events - events := make([]winterm.INPUT_RECORD, countRecords) - nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) - if err != nil { - return nil, err - } - - if eventsExist { - err = winterm.ReadConsoleInput(fd, events, &nEvents) - if err != nil { - return nil, err - } - } - - // Return a slice restricted to the number of returned records - logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) - return events[:nEvents], nil -} - -// KeyEvent Translation Helpers - -var arrowKeyMapPrefix = map[uint16]string{ - winterm.VK_UP: "%s%sA", - winterm.VK_DOWN: "%s%sB", - winterm.VK_RIGHT: "%s%sC", - winterm.VK_LEFT: "%s%sD", -} - -var keyMapPrefix = map[uint16]string{ - winterm.VK_UP: "\x1B[%sA", - winterm.VK_DOWN: "\x1B[%sB", - winterm.VK_RIGHT: "\x1B[%sC", - winterm.VK_LEFT: "\x1B[%sD", - winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 - winterm.VK_INSERT: "\x1B[2%s~", - winterm.VK_DELETE: "\x1B[3%s~", - winterm.VK_PRIOR: "\x1B[5%s~", - winterm.VK_NEXT: "\x1B[6%s~", - winterm.VK_F1: "", - winterm.VK_F2: "", - winterm.VK_F3: "\x1B[13%s~", - winterm.VK_F4: "\x1B[14%s~", - winterm.VK_F5: "\x1B[15%s~", - winterm.VK_F6: "\x1B[17%s~", - winterm.VK_F7: "\x1B[18%s~", - winterm.VK_F8: "\x1B[19%s~", - winterm.VK_F9: "\x1B[20%s~", - winterm.VK_F10: "\x1B[21%s~", - winterm.VK_F11: "\x1B[23%s~", - winterm.VK_F12: "\x1B[24%s~", -} - -// translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { - var buffer bytes.Buffer - for _, event := range events { - if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { - buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) - } - } - - return buffer.Bytes() -} - -// keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { - if keyEvent.UnicodeChar == 0 { - return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - - } - - // +Key generates ESC N Key - if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) - } - - return string(keyEvent.UnicodeChar) -} - -// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { - shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control) - - if format, ok := arrowKeyMapPrefix[key]; ok { - return fmt.Sprintf(format, escapeSequence, modifier) - } - - if format, ok := keyMapPrefix[key]; ok { - return fmt.Sprintf(format, modifier) - } - - return "" -} - -// getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState uint32) (shift, alt, control bool) { - shift = 0 != (controlState & winterm.SHIFT_PRESSED) - alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control bool) string { - if shift && alt && control { - return ansiterm.KEY_CONTROL_PARAM_8 - } - if alt && control { - return ansiterm.KEY_CONTROL_PARAM_7 - } - if shift && control { - return ansiterm.KEY_CONTROL_PARAM_6 - } - if control { - return ansiterm.KEY_CONTROL_PARAM_5 - } - if shift && alt { - return ansiterm.KEY_CONTROL_PARAM_4 - } - if alt { - return ansiterm.KEY_CONTROL_PARAM_3 - } - if shift { - return ansiterm.KEY_CONTROL_PARAM_2 - } - return "" -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go deleted file mode 100644 index 9f3232c0..00000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build windows - -package windows - -import ( - "io/ioutil" - "os" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger - -// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. -type ansiWriter struct { - file *os.File - fd uintptr - infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO - command []byte - escapeSequence []byte - inAnsiSequence bool - parser *ansiterm.AnsiParser -} - -func newAnsiWriter(nFile int) *ansiWriter { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiReaderWriter.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - - file, fd := winterm.GetStdFile(nFile) - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - logger.Infof("newAnsiWriter: parser %p", parser) - - aw := &ansiWriter{ - file: file, - fd: fd, - infoReset: info, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - parser: parser, - } - - logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) - logger.Infof("newAnsiWriter: %v", aw) - return aw -} - -func (aw *ansiWriter) Fd() uintptr { - return aw.fd -} - -// Write writes len(p) bytes from p to the underlying data stream. -func (aw *ansiWriter) Write(p []byte) (total int, err error) { - if len(p) == 0 { - return 0, nil - } - - logger.Infof("Write: % x", p) - logger.Infof("Write: %s", string(p)) - return aw.parser.Parse(p) -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go deleted file mode 100644 index 3036a046..00000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/console.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build windows - -package windows - -import ( - "io" - "os" - "syscall" - - "github.com/Azure/go-ansiterm/winterm" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Sirupsen/logrus" - "io/ioutil" -) - -// ConEmuStreams returns prepared versions of console streams, -// for proper use in ConEmu terminal. -// The ConEmu terminal emulates ANSI on output streams well by default. -func ConEmuStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - if IsConsole(os.Stdin.Fd()) { - stdIn = newAnsiReader(syscall.STD_INPUT_HANDLE) - } else { - stdIn = os.Stdin - } - - stdOut = os.Stdout - stdErr = os.Stderr - - // WARNING (BEGIN): sourced from newAnsiWriter - - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiReaderWriter.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - - // WARNING (END): sourced from newAnsiWriter - - return stdIn, stdOut, stdErr -} - -// ConsoleStreams returns a wrapped version for each standard stream referencing a console, -// that handles ANSI character sequences. -func ConsoleStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - if IsConsole(os.Stdin.Fd()) { - stdIn = newAnsiReader(syscall.STD_INPUT_HANDLE) - } else { - stdIn = os.Stdin - } - - if IsConsole(os.Stdout.Fd()) { - stdOut = newAnsiWriter(syscall.STD_OUTPUT_HANDLE) - } else { - stdOut = os.Stdout - } - - if IsConsole(os.Stderr.Fd()) { - stdErr = newAnsiWriter(syscall.STD_ERROR_HANDLE) - } else { - stdErr = os.Stderr - } - - return stdIn, stdOut, stdErr -} - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. -func GetHandleInfo(in interface{}) (uintptr, bool) { - switch t := in.(type) { - case *ansiReader: - return t.Fd(), true - case *ansiWriter: - return t.Fd(), true - } - - var inFd uintptr - var isTerminal bool - - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminal = IsConsole(inFd) - } - return inFd, isTerminal -} - -// IsConsole returns true if the given file descriptor is a Windows Console. -// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -func IsConsole(fd uintptr) bool { - _, e := winterm.GetConsoleMode(fd) - return e == nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go deleted file mode 100644 index bf4c7b50..00000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/windows.go +++ /dev/null @@ -1,5 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windows diff --git a/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go new file mode 100644 index 00000000..02610b8b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go @@ -0,0 +1,137 @@ +// Package truncindex provides a general 'index tree', used by Docker +// in order to be able to reference containers by only a few unambiguous +// characters of their id. +package truncindex + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tchap/go-patricia/patricia" +) + +var ( + // ErrEmptyPrefix is an error returned if the prefix was empty. + ErrEmptyPrefix = errors.New("Prefix can't be empty") + + // ErrIllegalChar is returned when a space is in the ID + ErrIllegalChar = errors.New("illegal character: ' '") + + // ErrNotExist is returned when ID or its prefix not found in index. + ErrNotExist = errors.New("ID does not exist") +) + +// ErrAmbiguousPrefix is returned if the prefix was ambiguous +// (multiple ids for the prefix). +type ErrAmbiguousPrefix struct { + prefix string +} + +func (e ErrAmbiguousPrefix) Error() string { + return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) +} + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + sync.RWMutex + trie *patricia.Trie + ids map[string]struct{} +} + +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. +func NewTruncIndex(ids []string) (idx *TruncIndex) { + idx = &TruncIndex{ + ids: make(map[string]struct{}), + + // Change patricia max prefix per node length, + // because our len(ID) always 64 + trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), + } + for _, id := range ids { + idx.addID(id) + } + return +} + +func (idx *TruncIndex) addID(id string) error { + if strings.Contains(id, " ") { + return ErrIllegalChar + } + if id == "" { + return ErrEmptyPrefix + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("id already exists: '%s'", id) + } + idx.ids[id] = struct{}{} + if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { + return fmt.Errorf("failed to insert id: %s", id) + } + return nil +} + +// Add adds a new ID to the TruncIndex. +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + if err := idx.addID(id); err != nil { + return err + } + return nil +} + +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() + if _, exists := idx.ids[id]; !exists || id == "" { + return fmt.Errorf("no such id: '%s'", id) + } + delete(idx.ids, id) + if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { + return fmt.Errorf("no such id: '%s'", id) + } + return nil +} + +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Get(s string) (string, error) { + if s == "" { + return "", ErrEmptyPrefix + } + var ( + id string + ) + subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { + if id != "" { + // we haven't found the ID if there are two or more IDs + id = "" + return ErrAmbiguousPrefix{prefix: string(prefix)} + } + id = string(prefix) + return nil + } + + idx.RLock() + defer idx.RUnlock() + if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { + return "", err + } + if id != "" { + return id, nil + } + return "", ErrNotExist +} + +// Iterate iterates over all stored IDs, and passes each of them to the given handler. +func (idx *TruncIndex) Iterate(handler func(id string)) { + idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { + handler(string(prefix)) + return nil + }) +} diff --git a/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go index 44152873..1135a4c6 100644 --- a/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go +++ b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go @@ -11,7 +11,7 @@ var ( validPrefixes = map[string][]string{ "url": {"http://", "https://"}, "git": {"git://", "github.com/", "git@"}, - "transport": {"tcp://", "tcp+tls://", "udp://", "unix://", "unixgram://"}, + "transport": {"tcp://", "tcp+tls://", "udp://", "unix://"}, } urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") ) diff --git a/vendor/github.com/docker/docker/pkg/useragent/README.md b/vendor/github.com/docker/docker/pkg/useragent/README.md new file mode 100644 index 00000000..d9cb367d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/useragent/README.md @@ -0,0 +1 @@ +This package provides helper functions to pack version information into a single User-Agent header. diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent.go b/vendor/github.com/docker/docker/pkg/useragent/useragent.go new file mode 100644 index 00000000..1137db51 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/useragent/useragent.go @@ -0,0 +1,55 @@ +// Package useragent provides helper functions to pack +// version information into a single User-Agent header. +package useragent + +import ( + "strings" +) + +// VersionInfo is used to model UserAgent versions. +type VersionInfo struct { + Name string + Version string +} + +func (vi *VersionInfo) isValid() bool { + const stopChars = " \t\r\n/" + name := vi.Name + vers := vi.Version + if len(name) == 0 || strings.ContainsAny(name, stopChars) { + return false + } + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { + return false + } + return true +} + +// AppendVersions converts versions to a string and appends the string to the string base. +// +// Each VersionInfo will be converted to a string in the format of +// "product/version", where the "product" is get from the name field, while +// version is get from the version field. Several pieces of version information +// will be concatenated and separated by space. +// +// Example: +// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) +// results in "base foo/1.0 bar/2.0". +func AppendVersions(base string, versions ...VersionInfo) string { + if len(versions) == 0 { + return base + } + + verstrs := make([]string, 0, 1+len(versions)) + if len(base) > 0 { + verstrs = append(verstrs, base) + } + + for _, v := range versions { + if !v.isValid() { + continue + } + verstrs = append(verstrs, v.Name+"/"+v.Version) + } + return strings.Join(verstrs, " ") +} diff --git a/vendor/github.com/docker/docker/pkg/version/version.go b/vendor/github.com/docker/docker/pkg/version/version.go new file mode 100644 index 00000000..c001279f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/version/version.go @@ -0,0 +1,68 @@ +package version + +import ( + "strconv" + "strings" +) + +// Version provides utility methods for comparing versions. +type Version string + +func (v Version) compareTo(other Version) int { + var ( + currTab = strings.Split(string(v), ".") + otherTab = strings.Split(string(other), ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// String returns the version string +func (v Version) String() string { + return string(v) +} + +// LessThan checks if a version is less than another +func (v Version) LessThan(other Version) bool { + return v.compareTo(other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func (v Version) LessThanOrEqualTo(other Version) bool { + return v.compareTo(other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func (v Version) GreaterThan(other Version) bool { + return v.compareTo(other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func (v Version) GreaterThanOrEqualTo(other Version) bool { + return v.compareTo(other) >= 0 +} + +// Equal checks if a version is equal to another +func (v Version) Equal(other Version) bool { + return v.compareTo(other) == 0 +} diff --git a/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go b/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go new file mode 100644 index 00000000..51dfa5cf --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go @@ -0,0 +1,115 @@ +// +build linux + +package apparmor + +import ( + "bufio" + "io" + "os" + "path" + "strings" + + "github.com/docker/docker/pkg/aaparser" + "github.com/docker/docker/utils/templates" +) + +var ( + // profileDirectory is the file store for apparmor profiles and macros. + profileDirectory = "/etc/apparmor.d" + // defaultProfilePath is the default path for the apparmor profile to be saved. + defaultProfilePath = path.Join(profileDirectory, "docker") +) + +// profileData holds information about the given profile for generation. +type profileData struct { + // Name is profile name. + Name string + // Imports defines the apparmor functions to import, before defining the profile. + Imports []string + // InnerImports defines the apparmor functions to import in the profile. + InnerImports []string + // Version is the {major, minor, patch} version of apparmor_parser as a single number. + Version int +} + +// generateDefault creates an apparmor profile from ProfileData. +func (p *profileData) generateDefault(out io.Writer) error { + compiled, err := templates.NewParse("apparmor_profile", baseTemplate) + if err != nil { + return err + } + + if macroExists("tunables/global") { + p.Imports = append(p.Imports, "#include ") + } else { + p.Imports = append(p.Imports, "@{PROC}=/proc/") + } + + if macroExists("abstractions/base") { + p.InnerImports = append(p.InnerImports, "#include ") + } + + ver, err := aaparser.GetVersion() + if err != nil { + return err + } + p.Version = ver + + if err := compiled.Execute(out, p); err != nil { + return err + } + return nil +} + +// macrosExists checks if the passed macro exists. +func macroExists(m string) bool { + _, err := os.Stat(path.Join(profileDirectory, m)) + return err == nil +} + +// InstallDefault generates a default profile and installs it in the +// ProfileDirectory with `apparmor_parser`. +func InstallDefault(name string) error { + // Make sure the path where they want to save the profile exists + if err := os.MkdirAll(profileDirectory, 0755); err != nil { + return err + } + + p := profileData{ + Name: name, + } + + f, err := os.OpenFile(defaultProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + if err := p.generateDefault(f); err != nil { + f.Close() + return err + } + f.Close() + + if err := aaparser.LoadProfile(defaultProfilePath); err != nil { + return err + } + + return nil +} + +// IsLoaded checks if a passed profile has been loaded into the kernel. +func IsLoaded(name string) error { + file, err := os.Open("/sys/kernel/security/apparmor/profiles") + if err != nil { + return err + } + r := bufio.NewReader(file) + for { + p, err := r.ReadString('\n') + if err != nil { + return err + } + if strings.HasPrefix(p, name+" ") { + return nil + } + } +} diff --git a/vendor/github.com/docker/docker/profiles/apparmor/template.go b/vendor/github.com/docker/docker/profiles/apparmor/template.go new file mode 100644 index 00000000..ada33bf0 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/apparmor/template.go @@ -0,0 +1,46 @@ +// +build linux + +package apparmor + +// baseTemplate defines the default apparmor profile for containers. +const baseTemplate = ` +{{range $value := .Imports}} +{{$value}} +{{end}} + +profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { +{{range $value := .InnerImports}} + {{$value}} +{{end}} + + network, + capability, + file, + umount, + + deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir) + # deny write to files not in /proc//** or /proc/sys/** + deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w, + deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel) + deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/ + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/kcore rwklx, + + deny mount, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/efi/efivars/** rwklx, + deny /sys/kernel/security/** rwklx, + +{{if ge .Version 208095}} + # suppress ptrace denials when using 'docker ps' or using 'ps' inside a container + ptrace (trace,read) peer=docker-default, +{{end}} +} +` diff --git a/vendor/github.com/docker/docker/profiles/seccomp/default.json b/vendor/github.com/docker/docker/profiles/seccomp/default.json new file mode 100755 index 00000000..28d564c1 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/default.json @@ -0,0 +1,1623 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "architectures": [ + "SCMP_ARCH_X86_64", + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ], + "syscalls": [ + { + "name": "accept", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "accept4", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "access", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "alarm", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "arch_prctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "bind", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "brk", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "capget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "capset", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chmod", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chown32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chroot", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clock_getres", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clock_gettime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clock_nanosleep", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clone", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + }, + { + "name": "close", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "connect", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "copy_file_range", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "creat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "dup", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "dup2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "dup3", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_create1", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_ctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_ctl_old", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_pwait", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_wait", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_wait_old", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "eventfd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "eventfd2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "execve", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "execveat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "exit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "exit_group", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "faccessat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fadvise64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fadvise64_64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fallocate", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fanotify_init", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fanotify_mark", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchmod", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchmodat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchown32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchownat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fcntl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fcntl64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fdatasync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fgetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "flistxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "flock", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fork", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fremovexattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fsetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstatat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstatfs", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstatfs64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fsync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ftruncate", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ftruncate64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "futex", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "futimesat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getcpu", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getcwd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getdents", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getdents64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getegid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getegid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "geteuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "geteuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgroups", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgroups32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getitimer", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpeername", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpgrp", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getppid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpriority", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getrandom", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getrlimit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "get_robust_list", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getrusage", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getsid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getsockname", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getsockopt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "get_thread_area", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "gettid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "gettimeofday", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_add_watch", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_init", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_init1", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_rm_watch", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_cancel", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ioctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_destroy", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_getevents", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ioprio_get", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ioprio_set", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_setup", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_submit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ipc", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "kill", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lchown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lchown32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lgetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "link", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "linkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "listen", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "listxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "llistxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "_llseek", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lremovexattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lseek", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lsetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lstat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lstat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "madvise", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "memfd_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mincore", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mkdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mkdirat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mknod", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mknodat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mlock", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mlock2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mlockall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mmap", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mmap2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mprotect", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_getsetattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_notify", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_open", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_timedreceive", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_timedsend", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_unlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mremap", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgrcv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgsnd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "munlock", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "munlockall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "munmap", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "nanosleep", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "newfstatat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "_newselect", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "open", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "openat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pause", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "personality", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 0, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "name": "personality", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 8, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "name": "personality", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 4294967295, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "name": "pipe", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pipe2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "poll", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ppoll", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "prctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pread64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "preadv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "prlimit64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pselect6", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pwrite64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pwritev", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "read", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readahead", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readlinkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recvfrom", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recvmmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recvmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "remap_file_pages", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "removexattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rename", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "renameat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "renameat2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "restart_syscall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rmdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigaction", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigpending", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigprocmask", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigqueueinfo", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigreturn", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigsuspend", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigtimedwait", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_tgsigqueueinfo", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getaffinity", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getparam", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_get_priority_max", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_get_priority_min", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getscheduler", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_rr_get_interval", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setaffinity", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setparam", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setscheduler", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_yield", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "seccomp", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "select", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semop", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semtimedop", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "send", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendfile", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendfile64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendmmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendto", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setdomainname", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgroups", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgroups32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sethostname", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setitimer", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setpgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setpriority", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setregid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setregid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setreuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setreuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setrlimit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "set_robust_list", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setsid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setsockopt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "set_thread_area", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "set_tid_address", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmdt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shutdown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sigaltstack", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "signalfd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "signalfd4", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sigreturn", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "socket", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "socketpair", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "splice", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "stat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "stat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "statfs", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "statfs64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "symlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "symlinkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sync_file_range", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "syncfs", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sysinfo", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "syslog", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "tee", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "tgkill", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "time", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_delete", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timerfd_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timerfd_gettime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timerfd_settime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_getoverrun", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_gettime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_settime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "times", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "tkill", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "truncate", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "truncate64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ugetrlimit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "umask", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "uname", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "unlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "unlinkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "utime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "utimensat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "utimes", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "vfork", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "vhangup", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "vmsplice", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "wait4", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "waitid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "waitpid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "write", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "writev", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "modify_ldt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "breakpoint", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "cacheflush", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "set_tls", + "action": "SCMP_ACT_ALLOW", + "args": [] + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/profiles/seccomp/generate.go b/vendor/github.com/docker/docker/profiles/seccomp/generate.go new file mode 100644 index 00000000..bf565947 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/generate.go @@ -0,0 +1,32 @@ +// +build ignore + +package main + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/profiles/seccomp" +) + +// saves the default seccomp profile as a json file so people can use it as a +// base for their own custom profiles +func main() { + wd, err := os.Getwd() + if err != nil { + panic(err) + } + f := filepath.Join(wd, "default.json") + + // write the default profile to the file + b, err := json.MarshalIndent(seccomp.DefaultProfile, "", "\t") + if err != nil { + panic(err) + } + + if err := ioutil.WriteFile(f, b, 0644); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go new file mode 100644 index 00000000..0718d840 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go @@ -0,0 +1,74 @@ +// +build linux + +package seccomp + +import ( + "encoding/json" + "fmt" + + "github.com/docker/engine-api/types" + "github.com/opencontainers/specs/specs-go" +) + +//go:generate go run -tags 'seccomp' generate.go + +// GetDefaultProfile returns the default seccomp profile. +func GetDefaultProfile() (*specs.Seccomp, error) { + return setupSeccomp(DefaultProfile) +} + +// LoadProfile takes a file path and decodes the seccomp profile. +func LoadProfile(body string) (*specs.Seccomp, error) { + var config types.Seccomp + if err := json.Unmarshal([]byte(body), &config); err != nil { + return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err) + } + + return setupSeccomp(&config) +} + +func setupSeccomp(config *types.Seccomp) (newConfig *specs.Seccomp, err error) { + if config == nil { + return nil, nil + } + + // No default action specified, no syscalls listed, assume seccomp disabled + if config.DefaultAction == "" && len(config.Syscalls) == 0 { + return nil, nil + } + + newConfig = &specs.Seccomp{} + + // if config.Architectures == 0 then libseccomp will figure out the architecture to use + if len(config.Architectures) > 0 { + for _, arch := range config.Architectures { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(arch)) + } + } + + newConfig.DefaultAction = specs.Action(config.DefaultAction) + + // Loop through all syscall blocks and convert them to libcontainer format + for _, call := range config.Syscalls { + newCall := specs.Syscall{ + Name: call.Name, + Action: specs.Action(call.Action), + } + + // Loop through all the arguments of the syscall and convert them + for _, arg := range call.Args { + newArg := specs.Arg{ + Index: arg.Index, + Value: arg.Value, + ValueTwo: arg.ValueTwo, + Op: specs.Operator(arg.Op), + } + + newCall.Args = append(newCall.Args, newArg) + } + + newConfig.Syscalls = append(newConfig.Syscalls, newCall) + } + + return newConfig, nil +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go new file mode 100644 index 00000000..be93d780 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go @@ -0,0 +1,1654 @@ +// +build linux,seccomp + +package seccomp + +import ( + "syscall" + + "github.com/docker/engine-api/types" + libseccomp "github.com/seccomp/libseccomp-golang" +) + +func arches() []types.Arch { + var native, err = libseccomp.GetNativeArch() + if err != nil { + return []types.Arch{} + } + var a = native.String() + switch a { + case "amd64": + return []types.Arch{types.ArchX86_64, types.ArchX86, types.ArchX32} + case "arm64": + return []types.Arch{types.ArchARM, types.ArchAARCH64} + case "mips64": + return []types.Arch{types.ArchMIPS, types.ArchMIPS64, types.ArchMIPS64N32} + case "mips64n32": + return []types.Arch{types.ArchMIPS, types.ArchMIPS64, types.ArchMIPS64N32} + case "mipsel64": + return []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64, types.ArchMIPSEL64N32} + case "mipsel64n32": + return []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64, types.ArchMIPSEL64N32} + default: + return []types.Arch{} + } +} + +// DefaultProfile defines the whitelist for the default seccomp profile. +var DefaultProfile = &types.Seccomp{ + DefaultAction: types.ActErrno, + Architectures: arches(), + Syscalls: []*types.Syscall{ + { + Name: "accept", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "accept4", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "access", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "alarm", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "arch_prctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "bind", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "brk", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "capget", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "capset", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chdir", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chmod", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chown32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chroot", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "clock_getres", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "clock_gettime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "clock_nanosleep", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "clone", + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + ValueTwo: 0, + Op: types.OpMaskedEqual, + }, + }, + }, + { + Name: "close", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "connect", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "copy_file_range", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "creat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "dup", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "dup2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "dup3", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_create", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_create1", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_ctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_ctl_old", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_pwait", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_wait", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_wait_old", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "eventfd", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "eventfd2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "execve", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "execveat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "exit", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "exit_group", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "faccessat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fadvise64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fadvise64_64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fallocate", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fanotify_init", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fanotify_mark", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchdir", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchmod", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchmodat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchown32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchownat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fcntl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fcntl64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fdatasync", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fgetxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "flistxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "flock", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fork", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fremovexattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fsetxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fstat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fstat64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fstatat64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fstatfs", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fstatfs64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fsync", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ftruncate", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ftruncate64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "futex", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "futimesat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getcpu", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getcwd", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getdents", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getdents64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getegid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getegid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "geteuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "geteuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getgid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getgroups", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getgroups32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getitimer", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getpeername", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getpgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getpgrp", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getpid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getppid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getpriority", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getrandom", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getresgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getresgid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getresuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getresuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getrlimit", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "get_robust_list", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getrusage", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getsid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getsockname", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getsockopt", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "get_thread_area", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "gettid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "gettimeofday", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "inotify_add_watch", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "inotify_init", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "inotify_init1", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "inotify_rm_watch", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "io_cancel", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ioctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "io_destroy", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "io_getevents", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ioprio_get", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ioprio_set", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "io_setup", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "io_submit", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ipc", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "kill", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lchown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lchown32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lgetxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "link", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "linkat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "listen", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "listxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "llistxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "_llseek", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lremovexattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lseek", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lsetxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lstat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lstat64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "madvise", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "memfd_create", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mincore", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mkdir", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mkdirat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mknod", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mknodat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mlock", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mlock2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mlockall", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mmap", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mmap2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mprotect", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_getsetattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_notify", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_open", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_timedreceive", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_timedsend", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_unlink", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mremap", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "msgctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "msgget", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "msgrcv", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "msgsnd", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "msync", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "munlock", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "munlockall", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "munmap", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "nanosleep", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "newfstatat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "_newselect", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "open", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "openat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pause", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "personality", + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0, + Op: types.OpEqualTo, + }, + }, + }, + { + Name: "personality", + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0008, + Op: types.OpEqualTo, + }, + }, + }, + { + Name: "personality", + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0xffffffff, + Op: types.OpEqualTo, + }, + }, + }, + { + Name: "pipe", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pipe2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "poll", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ppoll", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "prctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pread64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "preadv", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "prlimit64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pselect6", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pwrite64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pwritev", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "read", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "readahead", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "readlink", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "readlinkat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "readv", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "recv", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "recvfrom", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "recvmmsg", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "recvmsg", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "remap_file_pages", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "removexattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rename", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "renameat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "renameat2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "restart_syscall", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rmdir", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigaction", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigpending", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigprocmask", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigqueueinfo", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigreturn", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigsuspend", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigtimedwait", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_tgsigqueueinfo", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_getaffinity", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_getattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_getparam", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_get_priority_max", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_get_priority_min", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_getscheduler", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_rr_get_interval", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_setaffinity", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_setattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_setparam", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_setscheduler", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_yield", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "seccomp", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "select", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "semctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "semget", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "semop", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "semtimedop", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "send", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sendfile", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sendfile64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sendmmsg", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sendmsg", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sendto", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setdomainname", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setfsgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setfsgid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setfsuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setfsuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setgid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setgroups", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setgroups32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sethostname", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setitimer", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setpgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setpriority", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setregid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setregid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setresgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setresgid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setresuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setresuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setreuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setreuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setrlimit", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "set_robust_list", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setsid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setsockopt", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "set_thread_area", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "set_tid_address", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "shmat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "shmctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "shmdt", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "shmget", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "shutdown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sigaltstack", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "signalfd", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "signalfd4", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sigreturn", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "socket", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "socketpair", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "splice", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "stat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "stat64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "statfs", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "statfs64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "symlink", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "symlinkat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sync", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sync_file_range", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "syncfs", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sysinfo", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "syslog", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "tee", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "tgkill", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "time", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timer_create", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timer_delete", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timerfd_create", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timerfd_gettime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timerfd_settime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timer_getoverrun", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timer_gettime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timer_settime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "times", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "tkill", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "truncate", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "truncate64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ugetrlimit", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "umask", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "uname", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "unlink", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "unlinkat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "utime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "utimensat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "utimes", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "vfork", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "vhangup", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "vmsplice", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "wait4", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "waitid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "waitpid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "write", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "writev", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + // i386 specific syscalls + { + Name: "modify_ldt", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + // arm specific syscalls + { + Name: "breakpoint", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "cacheflush", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "set_tls", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }, +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go new file mode 100644 index 00000000..64963292 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go @@ -0,0 +1,10 @@ +// +build linux,!seccomp + +package seccomp + +import "github.com/docker/engine-api/types" + +var ( + // DefaultProfile is a nil pointer on unsupported systems. + DefaultProfile *types.Seccomp +) diff --git a/vendor/github.com/docker/docker/reference/store.go b/vendor/github.com/docker/docker/reference/store.go index 00a2a09e..91c5c2ae 100644 --- a/vendor/github.com/docker/docker/reference/store.go +++ b/vendor/github.com/docker/docker/reference/store.go @@ -4,6 +4,7 @@ import ( "encoding/json" "errors" "fmt" + "io/ioutil" "os" "path/filepath" "sort" @@ -11,7 +12,6 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/docker/image" - "github.com/docker/docker/pkg/ioutils" ) var ( @@ -256,7 +256,18 @@ func (store *store) save() error { if err != nil { return err } - return ioutils.AtomicWriteFile(store.jsonPath, jsonData, 0600) + + tempFilePath := store.jsonPath + ".tmp" + + if err := ioutil.WriteFile(tempFilePath, jsonData, 0600); err != nil { + return err + } + + if err := os.Rename(tempFilePath, store.jsonPath); err != nil { + return err + } + + return nil } func (store *store) reload() error { diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go index 51302d11..50061329 100644 --- a/vendor/github.com/docker/docker/registry/config.go +++ b/vendor/github.com/docker/docker/registry/config.go @@ -206,7 +206,7 @@ func ValidateIndexName(val string) (string, error) { return val, nil } -func validateNoScheme(reposName string) error { +func validateNoSchema(reposName string) error { if strings.Contains(reposName, "://") { // It cannot contain a scheme! return ErrInvalidRepositoryName diff --git a/vendor/github.com/docker/docker/registry/config_windows.go b/vendor/github.com/docker/docker/registry/config_windows.go deleted file mode 100644 index 82bc4afe..00000000 --- a/vendor/github.com/docker/docker/registry/config_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package registry - -import ( - "os" - "path/filepath" - "strings" -) - -// CertsDir is the directory where certificates are stored -var CertsDir = os.Getenv("programdata") + `\docker\certs.d` - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:\index.docker.io\v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return filepath.FromSlash(strings.Replace(s, ":", "", -1)) -} diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go index 3006e8ab..cdeca585 100644 --- a/vendor/github.com/docker/docker/registry/service.go +++ b/vendor/github.com/docker/docker/registry/service.go @@ -94,7 +94,7 @@ func splitReposSearchTerm(reposName string) (string, string) { // Search queries the public registry for images matching the specified // search terms, and returns the results. func (s *Service) Search(term string, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { - if err := validateNoScheme(term); err != nil { + if err := validateNoSchema(term); err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/restartmanager/restartmanager.go b/vendor/github.com/docker/docker/restartmanager/restartmanager.go new file mode 100644 index 00000000..0e844de2 --- /dev/null +++ b/vendor/github.com/docker/docker/restartmanager/restartmanager.go @@ -0,0 +1,131 @@ +package restartmanager + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/docker/engine-api/types/container" +) + +const ( + backoffMultiplier = 2 + defaultTimeout = 100 * time.Millisecond +) + +// ErrRestartCanceled is returned when the restart manager has been +// canceled and will no longer restart the container. +var ErrRestartCanceled = errors.New("restart canceled") + +// RestartManager defines object that controls container restarting rules. +type RestartManager interface { + Cancel() error + ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) +} + +type restartManager struct { + sync.Mutex + sync.Once + policy container.RestartPolicy + failureCount int + timeout time.Duration + active bool + cancel chan struct{} + canceled bool +} + +// New returns a new restartmanager based on a policy. +func New(policy container.RestartPolicy) RestartManager { + return &restartManager{policy: policy, cancel: make(chan struct{})} +} + +func (rm *restartManager) SetPolicy(policy container.RestartPolicy) { + rm.Lock() + rm.policy = policy + rm.Unlock() +} + +func (rm *restartManager) ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) { + if rm.policy.IsNone() { + return false, nil, nil + } + rm.Lock() + unlockOnExit := true + defer func() { + if unlockOnExit { + rm.Unlock() + } + }() + + if rm.canceled { + return false, nil, ErrRestartCanceled + } + + if rm.active { + return false, nil, fmt.Errorf("invalid call on active restartmanager") + } + + if exitCode != 0 { + rm.failureCount++ + } else { + rm.failureCount = 0 + } + + // if the container ran for more than 10s, reguardless of status and policy reset the + // the timeout back to the default. + if executionDuration.Seconds() >= 10 { + rm.timeout = 0 + } + if rm.timeout == 0 { + rm.timeout = defaultTimeout + } else { + rm.timeout *= backoffMultiplier + } + + var restart bool + switch { + case rm.policy.IsAlways(), rm.policy.IsUnlessStopped(): + restart = true + case rm.policy.IsOnFailure(): + // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count + if max := rm.policy.MaximumRetryCount; max == 0 || rm.failureCount <= max { + restart = exitCode != 0 + } + } + + if !restart { + rm.active = false + return false, nil, nil + } + + unlockOnExit = false + rm.active = true + rm.Unlock() + + ch := make(chan error) + go func() { + select { + case <-rm.cancel: + ch <- ErrRestartCanceled + close(ch) + case <-time.After(rm.timeout): + rm.Lock() + close(ch) + rm.active = false + rm.Unlock() + } + }() + + return true, ch, nil +} + +func (rm *restartManager) Cancel() error { + rm.Do(func() { + rm.Lock() + rm.canceled = true + close(rm.cancel) + rm.Unlock() + }) + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/compare.go b/vendor/github.com/docker/docker/runconfig/compare.go new file mode 100644 index 00000000..61346aab --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/compare.go @@ -0,0 +1,61 @@ +package runconfig + +import "github.com/docker/engine-api/types/container" + +// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields +// If OpenStdin is set, then it differs +func Compare(a, b *container.Config) bool { + if a == nil || b == nil || + a.OpenStdin || b.OpenStdin { + return false + } + if a.AttachStdout != b.AttachStdout || + a.AttachStderr != b.AttachStderr || + a.User != b.User || + a.OpenStdin != b.OpenStdin || + a.Tty != b.Tty { + return false + } + + if len(a.Cmd) != len(b.Cmd) || + len(a.Env) != len(b.Env) || + len(a.Labels) != len(b.Labels) || + len(a.ExposedPorts) != len(b.ExposedPorts) || + len(a.Entrypoint) != len(b.Entrypoint) || + len(a.Volumes) != len(b.Volumes) { + return false + } + + for i := 0; i < len(a.Cmd); i++ { + if a.Cmd[i] != b.Cmd[i] { + return false + } + } + for i := 0; i < len(a.Env); i++ { + if a.Env[i] != b.Env[i] { + return false + } + } + for k, v := range a.Labels { + if v != b.Labels[k] { + return false + } + } + for k := range a.ExposedPorts { + if _, exists := b.ExposedPorts[k]; !exists { + return false + } + } + + for i := 0; i < len(a.Entrypoint); i++ { + if a.Entrypoint[i] != b.Entrypoint[i] { + return false + } + } + for key := range a.Volumes { + if _, exists := b.Volumes[key]; !exists { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/runconfig/config.go b/vendor/github.com/docker/docker/runconfig/config.go new file mode 100644 index 00000000..9f3f9c5e --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config.go @@ -0,0 +1,71 @@ +package runconfig + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/volume" + "github.com/docker/engine-api/types/container" + networktypes "github.com/docker/engine-api/types/network" +) + +// DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper +// struct and returns both a Config and an HostConfig struct +// Be aware this function is not checking whether the resulted structs are nil, +// it's your business to do so +func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var w ContainerConfigWrapper + + decoder := json.NewDecoder(src) + if err := decoder.Decode(&w); err != nil { + return nil, nil, nil, err + } + + hc := w.getHostConfig() + + // Perform platform-specific processing of Volumes and Binds. + if w.Config != nil && hc != nil { + + // Initialize the volumes map if currently nil + if w.Config.Volumes == nil { + w.Config.Volumes = make(map[string]struct{}) + } + + // Now validate all the volumes and binds + if err := validateVolumesAndBindSettings(w.Config, hc); err != nil { + return nil, nil, nil, err + } + } + + // Certain parameters need daemon-side validation that cannot be done + // on the client, as only the daemon knows what is valid for the platform. + if err := ValidateNetMode(w.Config, hc); err != nil { + return nil, nil, nil, err + } + + // Validate isolation + if err := ValidateIsolation(hc); err != nil { + return nil, nil, nil, err + } + return w.Config, hc, w.NetworkingConfig, nil +} + +// validateVolumesAndBindSettings validates each of the volumes and bind settings +// passed by the caller to ensure they are valid. +func validateVolumesAndBindSettings(c *container.Config, hc *container.HostConfig) error { + + // Ensure all volumes and binds are valid. + for spec := range c.Volumes { + if _, err := volume.ParseMountSpec(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("Invalid volume spec %q: %v", spec, err) + } + } + for _, spec := range hc.Binds { + if _, err := volume.ParseMountSpec(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("Invalid bind mount spec %q: %v", spec, err) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/config_unix.go b/vendor/github.com/docker/docker/runconfig/config_unix.go new file mode 100644 index 00000000..e5902fb0 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config_unix.go @@ -0,0 +1,59 @@ +// +build !windows + +package runconfig + +import ( + "github.com/docker/engine-api/types/container" + networktypes "github.com/docker/engine-api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` + *container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. +} + +// getHostConfig gets the HostConfig of the Config. +// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + hc := w.HostConfig + + if hc == nil && w.InnerHostConfig != nil { + hc = w.InnerHostConfig + } else if w.InnerHostConfig != nil { + if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { + w.InnerHostConfig.Memory = hc.Memory + } + if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { + w.InnerHostConfig.MemorySwap = hc.MemorySwap + } + if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { + w.InnerHostConfig.CPUShares = hc.CPUShares + } + if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" { + w.InnerHostConfig.CpusetCpus = hc.CpusetCpus + } + + if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" { + w.InnerHostConfig.VolumeDriver = hc.VolumeDriver + } + + hc = w.InnerHostConfig + } + + if hc != nil { + if w.Cpuset != "" && hc.CpusetCpus == "" { + hc.CpusetCpus = w.Cpuset + } + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards compatible API behavior. + hc = SetDefaultNetModeIfBlank(hc) + + return hc +} diff --git a/vendor/github.com/docker/docker/runconfig/errors.go b/vendor/github.com/docker/docker/runconfig/errors.go new file mode 100644 index 00000000..d3608576 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/errors.go @@ -0,0 +1,40 @@ +package runconfig + +import ( + "fmt" +) + +var ( + // ErrConflictContainerNetworkAndLinks conflict between --net=container and links + ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: container type network can't be used with links. This would result in undefined behavior") + // ErrConflictUserDefinedNetworkAndLinks conflict between --net= and links + ErrConflictUserDefinedNetworkAndLinks = fmt.Errorf("Conflicting options: networking can't be used with links. This would result in undefined behavior") + // ErrConflictSharedNetwork conflict between private and other networks + ErrConflictSharedNetwork = fmt.Errorf("Container sharing network namespace with another container or host cannot be connected to any other network") + // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. + ErrConflictHostNetwork = fmt.Errorf("Container cannot be disconnected from host network or connected to host network") + // ErrConflictNoNetwork conflict between private and other networks + ErrConflictNoNetwork = fmt.Errorf("Container cannot be connected to multiple networks with one of the networks in private (none) mode") + // ErrConflictNetworkAndDNS conflict between --dns and the network mode + ErrConflictNetworkAndDNS = fmt.Errorf("Conflicting options: dns and the network mode") + // ErrConflictNetworkHostname conflict between the hostname and the network mode + ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: hostname and the network mode") + // ErrConflictHostNetworkAndLinks conflict between --net=host and links + ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: host type networking can't be used with links. This would result in undefined behavior") + // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode + ErrConflictContainerNetworkAndMac = fmt.Errorf("Conflicting options: mac-address and the network mode") + // ErrConflictNetworkHosts conflict between add-host and the network mode + ErrConflictNetworkHosts = fmt.Errorf("Conflicting options: custom host-to-IP mapping and the network mode") + // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode + ErrConflictNetworkPublishPorts = fmt.Errorf("Conflicting options: port publishing and the container type network mode") + // ErrConflictNetworkExposePorts conflict between the expose option and the network mode + ErrConflictNetworkExposePorts = fmt.Errorf("Conflicting options: port exposing and the container type network mode") + // ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address + ErrUnsupportedNetworkAndIP = fmt.Errorf("User specified IP address is supported on user defined networks only") + // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address + ErrUnsupportedNetworkNoSubnetAndIP = fmt.Errorf("User specified IP address is supported only when connecting to networks with user configured subnets") + // ErrUnsupportedNetworkAndAlias conflict between network mode and alias + ErrUnsupportedNetworkAndAlias = fmt.Errorf("Network-scoped alias is supported only for containers in user defined networks") + // ErrConflictUTSHostname conflict between the hostname and the UTS mode + ErrConflictUTSHostname = fmt.Errorf("Conflicting options: hostname and the UTS mode") +) diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig.go b/vendor/github.com/docker/docker/runconfig/hostconfig.go new file mode 100644 index 00000000..769cc9f5 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig.go @@ -0,0 +1,35 @@ +package runconfig + +import ( + "encoding/json" + "io" + + "github.com/docker/engine-api/types/container" +) + +// DecodeHostConfig creates a HostConfig based on the specified Reader. +// It assumes the content of the reader will be JSON, and decodes it. +func DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + decoder := json.NewDecoder(src) + + var w ContainerConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, err + } + + hc := w.getHostConfig() + return hc, nil +} + +// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure +// to default if it is not populated. This ensures backwards compatibility after +// the validation of the network mode was moved from the docker CLI to the +// docker daemon. +func SetDefaultNetModeIfBlank(hc *container.HostConfig) *container.HostConfig { + if hc != nil { + if hc.NetworkMode == container.NetworkMode("") { + hc.NetworkMode = container.NetworkMode("default") + } + } + return hc +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go new file mode 100644 index 00000000..efc26112 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go @@ -0,0 +1,89 @@ +// +build !windows + +package runconfig + +import ( + "fmt" + "runtime" + "strings" + + "github.com/docker/engine-api/types/container" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + n := container.NetworkMode(network) + return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() +} + +// ValidateNetMode ensures that the various combinations of requested +// network settings are valid. +func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + if parts[0] == "container" { + if len(parts) < 2 || parts[1] == "" { + return fmt.Errorf("--net: invalid net mode: invalid container format container:") + } + } + + if hc.NetworkMode.IsContainer() && c.Hostname != "" { + return ErrConflictNetworkHostname + } + + if hc.UTSMode.IsHost() && c.Hostname != "" { + return ErrConflictUTSHostname + } + + if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { + return ErrConflictHostNetworkAndLinks + } + + if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { + return ErrConflictContainerNetworkAndLinks + } + + if (hc.NetworkMode.IsHost() || hc.NetworkMode.IsContainer()) && len(hc.DNS) > 0 { + return ErrConflictNetworkAndDNS + } + + if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && len(hc.ExtraHosts) > 0 { + return ErrConflictNetworkHosts + } + + if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { + return ErrConflictContainerNetworkAndMac + } + + if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) { + return ErrConflictNetworkPublishPorts + } + + if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { + return ErrConflictNetworkExposePorts + } + return nil +} + +// ValidateIsolation performs platform specific validation of +// isolation in the hostconfig structure. Linux only supports "default" +// which is LXC container isolation +func ValidateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("invalid --isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) + } + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/opts.go b/vendor/github.com/docker/docker/runconfig/opts/opts.go index b6d2e000..f919218d 100644 --- a/vendor/github.com/docker/docker/runconfig/opts/opts.go +++ b/vendor/github.com/docker/docker/runconfig/opts/opts.go @@ -47,7 +47,7 @@ func doesEnvExist(name string) bool { } // ValidateExtraHost validates that the specified string is a valid extrahost and returns it. -// ExtraHost is in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). +// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). func ValidateExtraHost(val string) (string, error) { // allow for IPv6 addresses in extra hosts by only splitting on first ":" arr := strings.SplitN(val, ":", 2) diff --git a/vendor/github.com/docker/docker/runconfig/opts/parse.go b/vendor/github.com/docker/docker/runconfig/opts/parse.go index 54b5d41e..6543b406 100644 --- a/vendor/github.com/docker/docker/runconfig/opts/parse.go +++ b/vendor/github.com/docker/docker/runconfig/opts/parse.go @@ -6,6 +6,7 @@ import ( "fmt" "io/ioutil" "path" + "regexp" "strconv" "strings" @@ -41,7 +42,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.Host flDevices = opts.NewListOpts(ValidateDevice) flUlimits = NewUlimitOpt(nil) - flSysctls = opts.NewMapOpts(nil, opts.ValidateSysctl) flPublish = opts.NewListOpts(nil) flExpose = opts.NewListOpts(nil) @@ -55,7 +55,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.Host flCapDrop = opts.NewListOpts(nil) flGroupAdd = opts.NewListOpts(nil) flSecurityOpt = opts.NewListOpts(nil) - flStorageOpt = opts.NewListOpts(nil) flLabelsFile = opts.NewListOpts(nil) flLoggingOpts = opts.NewListOpts(nil) flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to this container") @@ -77,14 +76,11 @@ func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.Host flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCPUShares = cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") - flCPUPercent = cmd.Int64([]string{"-cpu-percent"}, 0, "CPU percent (Windows only)") flCPUPeriod = cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") flCPUQuota = cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") flCpusetCpus = cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") flCpusetMems = cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") flBlkioWeight = cmd.Uint16([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") - flIOMaxBandwidth = cmd.String([]string{"-io-maxbandwidth"}, "", "Maximum IO bandwidth limit for the system drive (Windows only)") - flIOMaxIOps = cmd.Uint64([]string{"-io-maxiops"}, 0, "Maximum IOps limit for the system drive (Windows only)") flSwappiness = cmd.Int64([]string{"-memory-swappiness"}, -1, "Tune container memory swappiness (0 to 100)") flNetMode = cmd.String([]string{"-net"}, "default", "Connect a container to a network") flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") @@ -128,9 +124,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.Host cmd.Var(&flCapDrop, []string{"-cap-drop"}, "Drop Linux capabilities") cmd.Var(&flGroupAdd, []string{"-group-add"}, "Add additional groups to join") cmd.Var(&flSecurityOpt, []string{"-security-opt"}, "Security Options") - cmd.Var(&flStorageOpt, []string{"-storage-opt"}, "Set storage driver options per container") cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") - cmd.Var(flSysctls, []string{"-sysctl"}, "Sysctl options") cmd.Var(&flLoggingOpts, []string{"-log-opt"}, "Log driver options") cmd.Require(flag.Min, 1) @@ -154,7 +148,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.Host if *flStdin { attachStdin = true } - // If -a is not set, attach to stdout and stderr + // If -a is not set attach to the output stdio if flAttach.Len() == 0 { attachStdout = true attachStderr = true @@ -211,18 +205,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.Host } } - // TODO FIXME units.RAMInBytes should have a uint64 version - var maxIOBandwidth int64 - if *flIOMaxBandwidth != "" { - maxIOBandwidth, err = units.RAMInBytes(*flIOMaxBandwidth) - if err != nil { - return nil, nil, nil, cmd, err - } - if maxIOBandwidth < 0 { - return nil, nil, nil, cmd, fmt.Errorf("invalid value: %s. Maximum IO Bandwidth must be positive", *flIOMaxBandwidth) - } - } - var binds []string // add any bind targets to the list of container volumes for bind := range flVolumes.GetMap() { @@ -259,6 +241,15 @@ func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.Host if *flEntrypoint != "" { entrypoint = strslice.StrSlice{*flEntrypoint} } + // Validate if the given hostname is RFC 1123 (https://tools.ietf.org/html/rfc1123) compliant. + hostname := *flHostname + if hostname != "" { + // Linux hostname is limited to HOST_NAME_MAX=64, not not including the terminating null byte. + matched, _ := regexp.MatchString("^(([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])\\.)*([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])$", hostname) + if len(hostname) > 64 || !matched { + return nil, nil, nil, cmd, fmt.Errorf("invalid hostname format for --hostname: %s", hostname) + } + } ports, portBindings, err := nat.ParsePortSpecs(flPublish.GetAll()) if err != nil { @@ -346,11 +337,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.Host return nil, nil, nil, cmd, err } - storageOpts, err := parseStorageOpts(flStorageOpt.GetAll()) - if err != nil { - return nil, nil, nil, cmd, err - } - resources := container.Resources{ CgroupParent: *flCgroupParent, Memory: flMemory, @@ -359,7 +345,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.Host MemorySwappiness: flSwappiness, KernelMemory: KernelMemory, OomKillDisable: flOomKillDisable, - CPUPercent: *flCPUPercent, CPUShares: *flCPUShares, CPUPeriod: *flCPUPeriod, CpusetCpus: *flCpusetCpus, @@ -372,8 +357,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.Host BlkioDeviceWriteBps: flDeviceWriteBps.GetList(), BlkioDeviceReadIOps: flDeviceReadIOps.GetList(), BlkioDeviceWriteIOps: flDeviceWriteIOps.GetList(), - IOMaximumIOps: *flIOMaxIOps, - IOMaximumBandwidth: uint64(maxIOBandwidth), Ulimits: flUlimits.GetList(), Devices: deviceMappings, } @@ -432,7 +415,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.Host GroupAdd: flGroupAdd.GetAll(), RestartPolicy: restartPolicy, SecurityOpt: securityOpts, - StorageOpt: storageOpts, ReadonlyRootfs: *flReadonlyRootfs, LogConfig: container.LogConfig{Type: *flLoggingDriver, Config: loggingOpts}, VolumeDriver: *flVolumeDriver, @@ -440,7 +422,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.Host ShmSize: shmSize, Resources: resources, Tmpfs: tmpfs, - Sysctls: flSysctls.GetAll(), } // When allocating stdin in attached mode, close stdin at client disconnect @@ -484,8 +465,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.Host return config, hostConfig, networkingConfig, cmd, nil } -// reads a file of line terminated key=value pairs, and overrides any keys -// present in the file with additional pairs specified in the override parameter +// reads a file of line terminated key=value pairs and override that with override parameter func readKVStrings(files []string, override []string) ([]string, error) { envVariables := []string{} for _, ef := range files { @@ -551,20 +531,6 @@ func parseSecurityOpts(securityOpts []string) ([]string, error) { return securityOpts, nil } -// parses storage options per container into a map -func parseStorageOpts(storageOpts []string) (map[string]string, error) { - m := make(map[string]string) - for _, option := range storageOpts { - if strings.Contains(option, "=") { - opt := strings.SplitN(option, "=", 2) - m[opt[0]] = opt[1] - } else { - return nil, fmt.Errorf("Invalid storage option.") - } - } - return m, nil -} - // ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect func ParseRestartPolicy(policy string) (container.RestartPolicy, error) { p := container.RestartPolicy{} @@ -652,7 +618,7 @@ func ParseLink(val string) (string, string, error) { if len(arr) == 1 { return val, val, nil } - // This is kept because we can actually get a HostConfig with links + // This is kept because we can actually get an HostConfig with links // from an already created container and the format is not `foo:bar` // but `/foo:/c1/bar` if strings.HasPrefix(arr[0], "/") { diff --git a/vendor/github.com/docker/docker/runconfig/streams.go b/vendor/github.com/docker/docker/runconfig/streams.go new file mode 100644 index 00000000..548c7826 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/streams.go @@ -0,0 +1,109 @@ +package runconfig + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/docker/docker/pkg/broadcaster" + "github.com/docker/docker/pkg/ioutils" +) + +// StreamConfig holds information about I/O streams managed together. +// +// streamConfig.StdinPipe returns a WriteCloser which can be used to feed data +// to the standard input of the streamConfig's active process. +// streamConfig.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser +// which can be used to retrieve the standard output (and error) generated +// by the container's active process. The output (and error) are actually +// copied and delivered to all StdoutPipe and StderrPipe consumers, using +// a kind of "broadcaster". +type StreamConfig struct { + sync.WaitGroup + stdout *broadcaster.Unbuffered + stderr *broadcaster.Unbuffered + stdin io.ReadCloser + stdinPipe io.WriteCloser +} + +// NewStreamConfig creates a stream config and initializes +// the standard err and standard out to new unbuffered broadcasters. +func NewStreamConfig() *StreamConfig { + return &StreamConfig{ + stderr: new(broadcaster.Unbuffered), + stdout: new(broadcaster.Unbuffered), + } +} + +// Stdout returns the standard output in the configuration. +func (streamConfig *StreamConfig) Stdout() *broadcaster.Unbuffered { + return streamConfig.stdout +} + +// Stderr returns the standard error in the configuration. +func (streamConfig *StreamConfig) Stderr() *broadcaster.Unbuffered { + return streamConfig.stderr +} + +// Stdin returns the standard input in the configuration. +func (streamConfig *StreamConfig) Stdin() io.ReadCloser { + return streamConfig.stdin +} + +// StdinPipe returns an input writer pipe as an io.WriteCloser. +func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser { + return streamConfig.stdinPipe +} + +// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new out pipe to the Stdout broadcaster. +func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe(nil) + streamConfig.stdout.Add(bytesPipe) + return bytesPipe +} + +// StderrPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new err pipe to the Stderr broadcaster. +func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe(nil) + streamConfig.stderr.Add(bytesPipe) + return bytesPipe +} + +// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe. +func (streamConfig *StreamConfig) NewInputPipes() { + streamConfig.stdin, streamConfig.stdinPipe = io.Pipe() +} + +// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input. +func (streamConfig *StreamConfig) NewNopInputPipe() { + streamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) +} + +// CloseStreams ensures that the configured streams are properly closed. +func (streamConfig *StreamConfig) CloseStreams() error { + var errors []string + + if streamConfig.stdin != nil { + if err := streamConfig.stdin.Close(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) + } + } + + if err := streamConfig.stdout.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) + } + + if err := streamConfig.stderr.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) + } + + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/trash.conf b/vendor/github.com/docker/docker/trash.conf new file mode 100644 index 00000000..6dbb226e --- /dev/null +++ b/vendor/github.com/docker/docker/trash.conf @@ -0,0 +1,66 @@ +github.com/Azure/go-ansiterm 70b2c90b260171e829f1ebd7c17f600c11858dbe +github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 +github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883 +github.com/Microsoft/go-winio v0.1.0 +github.com/Microsoft/hcsshim v0.1.0 +github.com/RackSec/srslog 259aed10dfa74ea2961eddd1d9847619f6e98837 +github.com/Sirupsen/logrus v0.9.0 +github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c +github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec +github.com/aws/aws-sdk-go v0.9.9 +github.com/boltdb/bolt v1.2.0 +github.com/cloudfoundry/gosigar 3ed7c74352dae6dc00bdc8c74045375352e3ec05 +github.com/codegangsta/cli 9fec0fad02befc9209347cc6d620e68e1b45f74d +github.com/coreos/etcd v2.2.0 +github.com/coreos/go-systemd v4 +github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d +github.com/docker/containerd ebb6d97f443fdcbf7084e41356359c99421d93f5 https://github.com/ibuildthecloud/containerd.git +github.com/docker/distribution 467fc068d88aa6610691b7f1a677271a3fac4aac +github.com/docker/engine-api v0.3.3 +github.com/docker/go v1.5.1-1-1-gbaf439e +github.com/docker/go-connections v0.2.0 +github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3 +github.com/docker/libkv c2aac5dbbaa5c872211edea7c0f32b3bd67e7410 +github.com/docker/libnetwork v0.7.0-rc.7 +github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a +github.com/docker/notary docker-v1.11-3 +github.com/fluent/fluent-logger-golang v1.1.0 +github.com/go-check/check a625211d932a2a643d0d17352095f03fb7774663 https://github.com/cpuguy83/check.git +github.com/godbus/dbus v3 +github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 +github.com/gorilla/context 14f550f51a +github.com/gorilla/mux e444e69cbd +github.com/hashicorp/consul v0.5.2 +github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b +github.com/hashicorp/memberlist 9a1e242e454d2443df330bdd51a436d5a9058fc4 +github.com/hashicorp/serf 7151adcef72687bf95f451a2e0ba15cb19412bf2 +github.com/imdario/mergo 0.2.1 +github.com/kr/pty 5cf931ef8f +github.com/mattn/go-shellwords v1.0.0 +github.com/mattn/go-sqlite3 v1.1.0 +github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7 +github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f +github.com/mistifyio/go-zfs v2.1.1 +github.com/opencontainers/runc edc34c4a8c1e261b5ce926ff557ecde1aff19ce3 https://github.com/ibuildthecloud/runc.git +github.com/opencontainers/runtime-spec f955d90e70a98ddfb886bd930ffd076da9b67998 +github.com/opencontainers/specs f955d90e70a98ddfb886bd930ffd076da9b67998 +github.com/philhofer/fwd 899e4efba8eaa1fea74175308f3fae18ff3319fa +github.com/rcrowley/go-metrics eeba7bd0dd01ace6e690fa833b3f22aaec29af43 +github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 +github.com/seccomp/libseccomp-golang 1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1 +github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 +github.com/tchap/go-patricia v2.1.0 +github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c +github.com/ugorji/go 5abd4e96a45c386928ed2ca2a7ef63e2533e18ec +github.com/vaughan0/go-ini a98ad7ee00ec53921f08832bc06ecf7fd600e6a1 +github.com/vbatts/tar-split v0.9.11 +github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 +github.com/vishvananda/netlink 631962935bff4f3d20ff32a72e8944f6d2836a26 +github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 +golang.org/x/net 991d3e32f76f19ee6d9caadb3a22eae8d23315f7 https://github.com/golang/net.git +golang.org/x/oauth2 2baa8a1b9338cf13d9eeb27696d761155fa480be https://github.com/golang/oauth2.git +golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git +google.golang.org/api dc6d2353af16e2a2b0ff6986af051d473a4ed468 https://code.googlesource.com/google-api-go-client +google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 https://code.googlesource.com/gocloud +google.golang.org/grpc ab0be5212fb225475f2087566eded7da5d727960 https://github.com/grpc/grpc-go.git +gopkg.in/fsnotify.v1 v1.2.0 diff --git a/vendor/github.com/docker/docker/utils/debug.go b/vendor/github.com/docker/docker/utils/debug.go new file mode 100644 index 00000000..d2038911 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/debug.go @@ -0,0 +1,26 @@ +package utils + +import ( + "os" + + "github.com/Sirupsen/logrus" +) + +// EnableDebug sets the DEBUG env var to true +// and makes the logger to log at debug level. +func EnableDebug() { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) +} + +// DisableDebug sets the DEBUG env var to false +// and makes the logger to log at info level. +func DisableDebug() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) +} + +// IsDebugEnabled checks whether the debug flag is set or not. +func IsDebugEnabled() bool { + return os.Getenv("DEBUG") != "" +} diff --git a/vendor/github.com/docker/docker/utils/experimental.go b/vendor/github.com/docker/docker/utils/experimental.go new file mode 100644 index 00000000..ceed0cb3 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/experimental.go @@ -0,0 +1,9 @@ +// +build experimental + +package utils + +// ExperimentalBuild is a stub which always returns true for +// builds that include the "experimental" build tag +func ExperimentalBuild() bool { + return true +} diff --git a/vendor/github.com/docker/docker/utils/names.go b/vendor/github.com/docker/docker/utils/names.go new file mode 100644 index 00000000..8239c0de --- /dev/null +++ b/vendor/github.com/docker/docker/utils/names.go @@ -0,0 +1,12 @@ +package utils + +import "regexp" + +// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names. +const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` + +// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters. +var RestrictedNamePattern = regexp.MustCompile(`^/?` + RestrictedNameChars + `+$`) + +// RestrictedVolumeNamePattern is a regular expression to validate volume names against the collection of restricted characters. +var RestrictedVolumeNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`) diff --git a/vendor/github.com/docker/docker/utils/process_unix.go b/vendor/github.com/docker/docker/utils/process_unix.go new file mode 100644 index 00000000..bdb1b46b --- /dev/null +++ b/vendor/github.com/docker/docker/utils/process_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package utils + +import ( + "syscall" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := syscall.Kill(pid, syscall.Signal(0)) + if err == nil || err == syscall.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + syscall.Kill(pid, syscall.SIGKILL) +} diff --git a/vendor/github.com/docker/docker/utils/stubs.go b/vendor/github.com/docker/docker/utils/stubs.go new file mode 100644 index 00000000..8a496d39 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/stubs.go @@ -0,0 +1,9 @@ +// +build !experimental + +package utils + +// ExperimentalBuild is a stub which always returns false for +// builds that do not include the "experimental" build tag +func ExperimentalBuild() bool { + return false +} diff --git a/vendor/github.com/docker/docker/utils/templates/templates.go b/vendor/github.com/docker/docker/utils/templates/templates.go new file mode 100644 index 00000000..749da3d5 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/templates/templates.go @@ -0,0 +1,33 @@ +package templates + +import ( + "encoding/json" + "strings" + "text/template" +) + +// basicFunctions are the set of initial +// functions provided to every template. +var basicFunctions = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, + "split": strings.Split, + "join": strings.Join, + "title": strings.Title, + "lower": strings.ToLower, + "upper": strings.ToUpper, +} + +// Parse creates a new annonymous template with the basic functions +// and parses the given format. +func Parse(format string) (*template.Template, error) { + return NewParse("", format) +} + +// NewParse creates a new tagged template with the basic functions +// and parses the given format. +func NewParse(tag, format string) (*template.Template, error) { + return template.New(tag).Funcs(basicFunctions).Parse(format) +} diff --git a/vendor/github.com/docker/docker/utils/utils.go b/vendor/github.com/docker/docker/utils/utils.go new file mode 100644 index 00000000..d3dd00ab --- /dev/null +++ b/vendor/github.com/docker/docker/utils/utils.go @@ -0,0 +1,87 @@ +package utils + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" +) + +var globalTestID string + +// TestDirectory creates a new temporary directory and returns its path. +// The contents of directory at path `templateDir` is copied into the +// new directory. +func TestDirectory(templateDir string) (dir string, err error) { + if globalTestID == "" { + globalTestID = stringid.GenerateNonCryptoID()[:4] + } + prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) + if prefix == "" { + prefix = "docker-test-" + } + dir, err = ioutil.TempDir("", prefix) + if err = os.Remove(dir); err != nil { + return + } + if templateDir != "" { + if err = archive.CopyWithTar(templateDir, dir); err != nil { + return + } + } + return +} + +// GetCallerName introspects the call stack and returns the name of the +// function `depth` levels down in the stack. +func GetCallerName(depth int) string { + // Use the caller function name as a prefix. + // This helps trace temp directories back to their test. + pc, _, _, _ := runtime.Caller(depth + 1) + callerLongName := runtime.FuncForPC(pc).Name() + parts := strings.Split(callerLongName, ".") + callerShortName := parts[len(parts)-1] + return callerShortName +} + +// ReplaceOrAppendEnvValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + cache[parts[0]] = i + } + + for _, value := range overrides { + // Values w/o = means they want this env to be removed/unset. + if !strings.Contains(value, "=") { + if i, exists := cache[value]; exists { + defaults[i] = "" // Used to indicate it should be removed + } + continue + } + + // Just do a normal set/update + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + defaults[i] = value + } else { + defaults = append(defaults, value) + } + } + + // Now remove all entries that we want to "unset" + for i := 0; i < len(defaults); i++ { + if defaults[i] == "" { + defaults = append(defaults[:i], defaults[i+1:]...) + i-- + } + } + + return defaults +} diff --git a/vendor/github.com/docker/docker/volume/drivers/adapter.go b/vendor/github.com/docker/docker/volume/drivers/adapter.go new file mode 100644 index 00000000..e7ca3d50 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/adapter.go @@ -0,0 +1,106 @@ +package volumedrivers + +import ( + "fmt" + + "github.com/docker/docker/volume" +) + +type volumeDriverAdapter struct { + name string + proxy *volumeDriverProxy +} + +func (a *volumeDriverAdapter) Name() string { + return a.name +} + +func (a *volumeDriverAdapter) Create(name string, opts map[string]string) (volume.Volume, error) { + if err := a.proxy.Create(name, opts); err != nil { + return nil, err + } + return &volumeAdapter{ + proxy: a.proxy, + name: name, + driverName: a.name, + }, nil +} + +func (a *volumeDriverAdapter) Remove(v volume.Volume) error { + return a.proxy.Remove(v.Name()) +} + +func (a *volumeDriverAdapter) List() ([]volume.Volume, error) { + ls, err := a.proxy.List() + if err != nil { + return nil, err + } + + var out []volume.Volume + for _, vp := range ls { + out = append(out, &volumeAdapter{ + proxy: a.proxy, + name: vp.Name, + driverName: a.name, + eMount: vp.Mountpoint, + }) + } + return out, nil +} + +func (a *volumeDriverAdapter) Get(name string) (volume.Volume, error) { + v, err := a.proxy.Get(name) + if err != nil { + return nil, err + } + + // plugin may have returned no volume and no error + if v == nil { + return nil, fmt.Errorf("no such volume") + } + + return &volumeAdapter{ + proxy: a.proxy, + name: v.Name, + driverName: a.Name(), + eMount: v.Mountpoint, + }, nil +} + +type volumeAdapter struct { + proxy *volumeDriverProxy + name string + driverName string + eMount string // ephemeral host volume path +} + +type proxyVolume struct { + Name string + Mountpoint string +} + +func (a *volumeAdapter) Name() string { + return a.name +} + +func (a *volumeAdapter) DriverName() string { + return a.driverName +} + +func (a *volumeAdapter) Path() string { + if len(a.eMount) > 0 { + return a.eMount + } + m, _ := a.proxy.Path(a.name) + return m +} + +func (a *volumeAdapter) Mount() (string, error) { + var err error + a.eMount, err = a.proxy.Mount(a.name) + return a.eMount, err +} + +func (a *volumeAdapter) Unmount() error { + return a.proxy.Unmount(a.name) +} diff --git a/vendor/github.com/docker/docker/volume/drivers/extpoint.go b/vendor/github.com/docker/docker/volume/drivers/extpoint.go new file mode 100644 index 00000000..a55da5af --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/extpoint.go @@ -0,0 +1,164 @@ +//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver + +package volumedrivers + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/volume" +) + +// currently created by hand. generation tool would generate this like: +// $ extpoint-gen Driver > volume/extpoint.go + +var drivers = &driverExtpoint{extensions: make(map[string]volume.Driver), driverLock: &locker.Locker{}} + +const extName = "VolumeDriver" + +// NewVolumeDriver returns a driver has the given name mapped on the given client. +func NewVolumeDriver(name string, c client) volume.Driver { + proxy := &volumeDriverProxy{c} + return &volumeDriverAdapter{name: name, proxy: proxy} +} + +type opts map[string]string +type list []*proxyVolume + +// volumeDriver defines the available functions that volume plugins must implement. +// This interface is only defined to generate the proxy objects. +// It's not intended to be public or reused. +type volumeDriver interface { + // Create a volume with the given name + Create(name string, opts opts) (err error) + // Remove the volume with the given name + Remove(name string) (err error) + // Get the mountpoint of the given volume + Path(name string) (mountpoint string, err error) + // Mount the given volume and return the mountpoint + Mount(name string) (mountpoint string, err error) + // Unmount the given volume + Unmount(name string) (err error) + // List lists all the volumes known to the driver + List() (volumes list, err error) + // Get retrieves the volume with the requested name + Get(name string) (volume *proxyVolume, err error) +} + +type driverExtpoint struct { + extensions map[string]volume.Driver + sync.Mutex + driverLock *locker.Locker +} + +// Register associates the given driver to the given name, checking if +// the name is already associated +func Register(extension volume.Driver, name string) bool { + if name == "" { + return false + } + + drivers.Lock() + defer drivers.Unlock() + + _, exists := drivers.extensions[name] + if exists { + return false + } + drivers.extensions[name] = extension + return true +} + +// Unregister dissociates the name from its driver, if the association exists. +func Unregister(name string) bool { + drivers.Lock() + defer drivers.Unlock() + + _, exists := drivers.extensions[name] + if !exists { + return false + } + delete(drivers.extensions, name) + return true +} + +// Lookup returns the driver associated with the given name. If a +// driver with the given name has not been registered it checks if +// there is a VolumeDriver plugin available with the given name. +func Lookup(name string) (volume.Driver, error) { + drivers.driverLock.Lock(name) + defer drivers.driverLock.Unlock(name) + + drivers.Lock() + ext, ok := drivers.extensions[name] + drivers.Unlock() + if ok { + return ext, nil + } + + pl, err := plugins.Get(name, extName) + if err != nil { + return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err) + } + + drivers.Lock() + defer drivers.Unlock() + if ext, ok := drivers.extensions[name]; ok { + return ext, nil + } + + d := NewVolumeDriver(name, pl.Client) + drivers.extensions[name] = d + return d, nil +} + +// GetDriver returns a volume driver by its name. +// If the driver is empty, it looks for the local driver. +func GetDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return Lookup(name) +} + +// GetDriverList returns list of volume drivers registered. +// If no driver is registered, empty string list will be returned. +func GetDriverList() []string { + var driverList []string + drivers.Lock() + for driverName := range drivers.extensions { + driverList = append(driverList, driverName) + } + drivers.Unlock() + return driverList +} + +// GetAllDrivers lists all the registered drivers +func GetAllDrivers() ([]volume.Driver, error) { + plugins, err := plugins.GetAll(extName) + if err != nil { + return nil, err + } + var ds []volume.Driver + + drivers.Lock() + defer drivers.Unlock() + + for _, d := range drivers.extensions { + ds = append(ds, d) + } + + for _, p := range plugins { + ext, ok := drivers.extensions[p.Name] + if ok { + continue + } + + ext = NewVolumeDriver(p.Name, p.Client) + drivers.extensions[p.Name] = ext + ds = append(ds, ext) + } + return ds, nil +} diff --git a/vendor/github.com/docker/docker/volume/drivers/proxy.go b/vendor/github.com/docker/docker/volume/drivers/proxy.go new file mode 100644 index 00000000..5c7cdcb7 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/proxy.go @@ -0,0 +1,207 @@ +// generated code - DO NOT EDIT + +package volumedrivers + +import "errors" + +type client interface { + Call(string, interface{}, interface{}) error +} + +type volumeDriverProxy struct { + client +} + +type volumeDriverProxyCreateRequest struct { + Name string + Opts opts +} + +type volumeDriverProxyCreateResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Create(name string, opts opts) (err error) { + var ( + req volumeDriverProxyCreateRequest + ret volumeDriverProxyCreateResponse + ) + + req.Name = name + req.Opts = opts + if err = pp.Call("VolumeDriver.Create", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyRemoveRequest struct { + Name string +} + +type volumeDriverProxyRemoveResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Remove(name string) (err error) { + var ( + req volumeDriverProxyRemoveRequest + ret volumeDriverProxyRemoveResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Remove", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyPathRequest struct { + Name string +} + +type volumeDriverProxyPathResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Path(name string) (mountpoint string, err error) { + var ( + req volumeDriverProxyPathRequest + ret volumeDriverProxyPathResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Path", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyMountRequest struct { + Name string +} + +type volumeDriverProxyMountResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Mount(name string) (mountpoint string, err error) { + var ( + req volumeDriverProxyMountRequest + ret volumeDriverProxyMountResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Mount", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyUnmountRequest struct { + Name string +} + +type volumeDriverProxyUnmountResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Unmount(name string) (err error) { + var ( + req volumeDriverProxyUnmountRequest + ret volumeDriverProxyUnmountResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Unmount", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyListRequest struct { +} + +type volumeDriverProxyListResponse struct { + Volumes list + Err string +} + +func (pp *volumeDriverProxy) List() (volumes list, err error) { + var ( + req volumeDriverProxyListRequest + ret volumeDriverProxyListResponse + ) + + if err = pp.Call("VolumeDriver.List", req, &ret); err != nil { + return + } + + volumes = ret.Volumes + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyGetRequest struct { + Name string +} + +type volumeDriverProxyGetResponse struct { + Volume *proxyVolume + Err string +} + +func (pp *volumeDriverProxy) Get(name string) (volume *proxyVolume, err error) { + var ( + req volumeDriverProxyGetRequest + ret volumeDriverProxyGetResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Get", req, &ret); err != nil { + return + } + + volume = ret.Volume + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} diff --git a/vendor/github.com/docker/docker/volume/local/local.go b/vendor/github.com/docker/docker/volume/local/local.go new file mode 100644 index 00000000..0bca731a --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local.go @@ -0,0 +1,330 @@ +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume" +) + +// VolumeDataPathName is the name of the directory where the volume data is stored. +// It uses a very distinctive name to avoid collisions migrating data between +// Docker versions. +const ( + VolumeDataPathName = "_data" + volumesPathName = "volumes" +) + +var ( + // ErrNotFound is the typed error returned when the requested volume name can't be found + ErrNotFound = fmt.Errorf("volume not found") + // volumeNameRegex ensures the name assigned for the volume is valid. + // This name is used to create the bind directory, so we need to avoid characters that + // would make the path to escape the root directory. + volumeNameRegex = utils.RestrictedVolumeNamePattern +) + +type validationError struct { + error +} + +func (validationError) IsValidationError() bool { + return true +} + +type activeMount struct { + count uint64 + mounted bool +} + +// New instantiates a new Root instance with the provided scope. Scope +// is the base path that the Root instance uses to store its +// volumes. The base path is created here if it does not exist. +func New(scope string, rootUID, rootGID int) (*Root, error) { + rootDirectory := filepath.Join(scope, volumesPathName) + + if err := idtools.MkdirAllAs(rootDirectory, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + r := &Root{ + scope: scope, + path: rootDirectory, + volumes: make(map[string]*localVolume), + rootUID: rootUID, + rootGID: rootGID, + } + + dirs, err := ioutil.ReadDir(rootDirectory) + if err != nil { + return nil, err + } + + mountInfos, err := mount.GetMounts() + if err != nil { + logrus.Debugf("error looking up mounts for local volume cleanup: %v", err) + } + + for _, d := range dirs { + if !d.IsDir() { + continue + } + + name := filepath.Base(d.Name()) + v := &localVolume{ + driverName: r.Name(), + name: name, + path: r.DataPath(name), + } + r.volumes[name] = v + if b, err := ioutil.ReadFile(filepath.Join(name, "opts.json")); err == nil { + if err := json.Unmarshal(b, v.opts); err != nil { + return nil, err + } + + // unmount anything that may still be mounted (for example, from an unclean shutdown) + for _, info := range mountInfos { + if info.Mountpoint == v.path { + mount.Unmount(v.path) + break + } + } + } + } + + return r, nil +} + +// Root implements the Driver interface for the volume package and +// manages the creation/removal of volumes. It uses only standard vfs +// commands to create/remove dirs within its provided scope. +type Root struct { + m sync.Mutex + scope string + path string + volumes map[string]*localVolume + rootUID int + rootGID int +} + +// List lists all the volumes +func (r *Root) List() ([]volume.Volume, error) { + var ls []volume.Volume + r.m.Lock() + for _, v := range r.volumes { + ls = append(ls, v) + } + r.m.Unlock() + return ls, nil +} + +// DataPath returns the constructed path of this volume. +func (r *Root) DataPath(volumeName string) string { + return filepath.Join(r.path, volumeName, VolumeDataPathName) +} + +// Name returns the name of Root, defined in the volume package in the DefaultDriverName constant. +func (r *Root) Name() string { + return volume.DefaultDriverName +} + +// Create creates a new volume.Volume with the provided name, creating +// the underlying directory tree required for this volume in the +// process. +func (r *Root) Create(name string, opts map[string]string) (volume.Volume, error) { + if err := r.validateName(name); err != nil { + return nil, err + } + + r.m.Lock() + defer r.m.Unlock() + + v, exists := r.volumes[name] + if exists { + return v, nil + } + + path := r.DataPath(name) + if err := idtools.MkdirAllAs(path, 0755, r.rootUID, r.rootGID); err != nil { + if os.IsExist(err) { + return nil, fmt.Errorf("volume already exists under %s", filepath.Dir(path)) + } + return nil, err + } + + var err error + defer func() { + if err != nil { + os.RemoveAll(filepath.Dir(path)) + } + }() + + v = &localVolume{ + driverName: r.Name(), + name: name, + path: path, + } + + if opts != nil { + if err = setOpts(v, opts); err != nil { + return nil, err + } + var b []byte + b, err = json.Marshal(v.opts) + if err != nil { + return nil, err + } + if err = ioutil.WriteFile(filepath.Join(filepath.Dir(path), "opts.json"), b, 600); err != nil { + return nil, err + } + } + + r.volumes[name] = v + return v, nil +} + +// Remove removes the specified volume and all underlying data. If the +// given volume does not belong to this driver and an error is +// returned. The volume is reference counted, if all references are +// not released then the volume is not removed. +func (r *Root) Remove(v volume.Volume) error { + r.m.Lock() + defer r.m.Unlock() + + lv, ok := v.(*localVolume) + if !ok { + return fmt.Errorf("unknown volume type %T", v) + } + + realPath, err := filepath.EvalSymlinks(lv.path) + if err != nil { + if !os.IsNotExist(err) { + return err + } + realPath = filepath.Dir(lv.path) + } + + if !r.scopedPath(realPath) { + return fmt.Errorf("Unable to remove a directory of out the Docker root %s: %s", r.scope, realPath) + } + + if err := removePath(realPath); err != nil { + return err + } + + delete(r.volumes, lv.name) + return removePath(filepath.Dir(lv.path)) +} + +func removePath(path string) error { + if err := os.RemoveAll(path); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + return nil +} + +// Get looks up the volume for the given name and returns it if found +func (r *Root) Get(name string) (volume.Volume, error) { + r.m.Lock() + v, exists := r.volumes[name] + r.m.Unlock() + if !exists { + return nil, ErrNotFound + } + return v, nil +} + +func (r *Root) validateName(name string) error { + if !volumeNameRegex.MatchString(name) { + return validationError{fmt.Errorf("%q includes invalid characters for a local volume name, only %q are allowed", name, utils.RestrictedNameChars)} + } + return nil +} + +// localVolume implements the Volume interface from the volume package and +// represents the volumes created by Root. +type localVolume struct { + m sync.Mutex + usedCount int + // unique name of the volume + name string + // path is the path on the host where the data lives + path string + // driverName is the name of the driver that created the volume. + driverName string + // opts is the parsed list of options used to create the volume + opts *optsConfig + // active refcounts the active mounts + active activeMount +} + +// Name returns the name of the given Volume. +func (v *localVolume) Name() string { + return v.name +} + +// DriverName returns the driver that created the given Volume. +func (v *localVolume) DriverName() string { + return v.driverName +} + +// Path returns the data location. +func (v *localVolume) Path() string { + return v.path +} + +// Mount implements the localVolume interface, returning the data location. +func (v *localVolume) Mount() (string, error) { + v.m.Lock() + defer v.m.Unlock() + if v.opts != nil { + if !v.active.mounted { + if err := v.mount(); err != nil { + return "", err + } + v.active.mounted = true + } + v.active.count++ + } + return v.path, nil +} + +// Umount is for satisfying the localVolume interface and does not do anything in this driver. +func (v *localVolume) Unmount() error { + v.m.Lock() + defer v.m.Unlock() + if v.opts != nil { + v.active.count-- + if v.active.count == 0 { + if err := mount.Unmount(v.path); err != nil { + v.active.count++ + return err + } + v.active.mounted = false + } + } + return nil +} + +func validateOpts(opts map[string]string) error { + for opt := range opts { + if !validOpts[opt] { + return validationError{fmt.Errorf("invalid option key: %q", opt)} + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/volume/local/local_unix.go b/vendor/github.com/docker/docker/volume/local/local_unix.go new file mode 100644 index 00000000..2e63777a --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local_unix.go @@ -0,0 +1,69 @@ +// +build linux freebsd + +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/mount" +) + +var ( + oldVfsDir = filepath.Join("vfs", "dir") + + validOpts = map[string]bool{ + "type": true, // specify the filesystem type for mount, e.g. nfs + "o": true, // generic mount options + "device": true, // device to mount from + } +) + +type optsConfig struct { + MountType string + MountOpts string + MountDevice string +} + +// scopedPath verifies that the path where the volume is located +// is under Docker's root and the valid local paths. +func (r *Root) scopedPath(realPath string) bool { + // Volumes path for Docker version >= 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) && realPath != filepath.Join(r.scope, volumesPathName) { + return true + } + + // Volumes path for Docker version < 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, oldVfsDir)) { + return true + } + + return false +} + +func setOpts(v *localVolume, opts map[string]string) error { + if len(opts) == 0 { + return nil + } + if err := validateOpts(opts); err != nil { + return err + } + + v.opts = &optsConfig{ + MountType: opts["type"], + MountOpts: opts["o"], + MountDevice: opts["device"], + } + return nil +} + +func (v *localVolume) mount() error { + if v.opts.MountDevice == "" { + return fmt.Errorf("missing device in volume options") + } + return mount.Mount(v.opts.MountDevice, v.path, v.opts.MountType, v.opts.MountOpts) +} diff --git a/vendor/github.com/docker/docker/volume/store/errors.go b/vendor/github.com/docker/docker/volume/store/errors.go new file mode 100644 index 00000000..7bdfa12b --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/errors.go @@ -0,0 +1,74 @@ +package store + +import ( + "errors" + "strings" +) + +var ( + // errVolumeInUse is a typed error returned when trying to remove a volume that is currently in use by a container + errVolumeInUse = errors.New("volume is in use") + // errNoSuchVolume is a typed error returned if the requested volume doesn't exist in the volume store + errNoSuchVolume = errors.New("no such volume") + // errInvalidName is a typed error returned when creating a volume with a name that is not valid on the platform + errInvalidName = errors.New("volume name is not valid on this platform") + // errNameConflict is a typed error returned on create when a volume exists with the given name, but for a different driver + errNameConflict = errors.New("conflict: volume name must be unique") +) + +// OpErr is the error type returned by functions in the store package. It describes +// the operation, volume name, and error. +type OpErr struct { + // Err is the error that occurred during the operation. + Err error + // Op is the operation which caused the error, such as "create", or "list". + Op string + // Name is the name of the resource being requested for this op, typically the volume name or the driver name. + Name string + // Refs is the list of references associated with the resource. + Refs []string +} + +// Error satisfies the built-in error interface type. +func (e *OpErr) Error() string { + if e == nil { + return "" + } + s := e.Op + if e.Name != "" { + s = s + " " + e.Name + } + + s = s + ": " + e.Err.Error() + if len(e.Refs) > 0 { + s = s + " - " + "[" + strings.Join(e.Refs, ", ") + "]" + } + return s +} + +// IsInUse returns a boolean indicating whether the error indicates that a +// volume is in use +func IsInUse(err error) bool { + return isErr(err, errVolumeInUse) +} + +// IsNotExist returns a boolean indicating whether the error indicates that the volume does not exist +func IsNotExist(err error) bool { + return isErr(err, errNoSuchVolume) +} + +// IsNameConflict returns a boolean indicating whether the error indicates that a +// volume name is already taken +func IsNameConflict(err error) bool { + return isErr(err, errNameConflict) +} + +func isErr(err error, expected error) bool { + switch pe := err.(type) { + case nil: + return false + case *OpErr: + err = pe.Err + } + return err == expected +} diff --git a/vendor/github.com/docker/docker/volume/store/store.go b/vendor/github.com/docker/docker/volume/store/store.go new file mode 100644 index 00000000..b2ac9a8c --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store.go @@ -0,0 +1,506 @@ +package store + +import ( + "bytes" + "encoding/json" + "os" + "path/filepath" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +const ( + volumeDataDir = "volumes" + volumeBucketName = "volumes" +) + +type volumeMetadata struct { + Name string + Labels map[string]string +} + +type volumeWithLabels struct { + volume.Volume + labels map[string]string +} + +func (v volumeWithLabels) Labels() map[string]string { + return v.labels +} + +// New initializes a VolumeStore to keep +// reference counting of volumes in the system. +func New(rootPath string) (*VolumeStore, error) { + vs := &VolumeStore{ + locks: &locker.Locker{}, + names: make(map[string]volume.Volume), + refs: make(map[string][]string), + labels: make(map[string]map[string]string), + } + + if rootPath != "" { + // initialize metadata store + volPath := filepath.Join(rootPath, volumeDataDir) + if err := os.MkdirAll(volPath, 750); err != nil { + return nil, err + } + + dbPath := filepath.Join(volPath, "metadata.db") + + var err error + vs.db, err = bolt.Open(dbPath, 0600, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return nil, err + } + + // initialize volumes bucket + if err := vs.db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists([]byte(volumeBucketName)); err != nil { + return err + } + + return nil + }); err != nil { + return nil, err + } + } + + return vs, nil +} + +func (s *VolumeStore) getNamed(name string) (volume.Volume, bool) { + s.globalLock.Lock() + v, exists := s.names[name] + s.globalLock.Unlock() + return v, exists +} + +func (s *VolumeStore) setNamed(v volume.Volume, ref string) { + s.globalLock.Lock() + s.names[v.Name()] = v + if len(ref) > 0 { + s.refs[v.Name()] = append(s.refs[v.Name()], ref) + } + s.globalLock.Unlock() +} + +func (s *VolumeStore) purge(name string) { + s.globalLock.Lock() + delete(s.names, name) + delete(s.refs, name) + delete(s.labels, name) + s.globalLock.Unlock() +} + +// VolumeStore is a struct that stores the list of volumes available and keeps track of their usage counts +type VolumeStore struct { + locks *locker.Locker + globalLock sync.Mutex + // names stores the volume name -> driver name relationship. + // This is used for making lookups faster so we don't have to probe all drivers + names map[string]volume.Volume + // refs stores the volume name and the list of things referencing it + refs map[string][]string + // labels stores volume labels for each volume + labels map[string]map[string]string + db *bolt.DB +} + +// List proxies to all registered volume drivers to get the full list of volumes +// If a driver returns a volume that has name which conflicts with another volume from a different driver, +// the first volume is chosen and the conflicting volume is dropped. +func (s *VolumeStore) List() ([]volume.Volume, []string, error) { + vols, warnings, err := s.list() + if err != nil { + return nil, nil, &OpErr{Err: err, Op: "list"} + } + var out []volume.Volume + + for _, v := range vols { + name := normaliseVolumeName(v.Name()) + + s.locks.Lock(name) + storedV, exists := s.getNamed(name) + // Note: it's not safe to populate the cache here because the volume may have been + // deleted before we acquire a lock on its name + if exists && storedV.DriverName() != v.DriverName() { + logrus.Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), storedV.DriverName(), v.DriverName()) + s.locks.Unlock(v.Name()) + continue + } + + out = append(out, v) + s.locks.Unlock(v.Name()) + } + return out, warnings, nil +} + +// list goes through each volume driver and asks for its list of volumes. +func (s *VolumeStore) list() ([]volume.Volume, []string, error) { + drivers, err := volumedrivers.GetAllDrivers() + if err != nil { + return nil, nil, err + } + var ( + ls []volume.Volume + warnings []string + ) + + type vols struct { + vols []volume.Volume + err error + driverName string + } + chVols := make(chan vols, len(drivers)) + + for _, vd := range drivers { + go func(d volume.Driver) { + vs, err := d.List() + if err != nil { + chVols <- vols{driverName: d.Name(), err: &OpErr{Err: err, Name: d.Name(), Op: "list"}} + return + } + chVols <- vols{vols: vs} + }(vd) + } + + badDrivers := make(map[string]struct{}) + for i := 0; i < len(drivers); i++ { + vs := <-chVols + + if vs.err != nil { + warnings = append(warnings, vs.err.Error()) + badDrivers[vs.driverName] = struct{}{} + logrus.Warn(vs.err) + } + ls = append(ls, vs.vols...) + } + + if len(badDrivers) > 0 { + for _, v := range s.names { + if _, exists := badDrivers[v.DriverName()]; exists { + ls = append(ls, v) + } + } + } + return ls, warnings, nil +} + +// CreateWithRef creates a volume with the given name and driver and stores the ref +// This is just like Create() except we store the reference while holding the lock. +// This ensures there's no race between creating a volume and then storing a reference. +func (s *VolumeStore) CreateWithRef(name, driverName, ref string, opts, labels map[string]string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.create(name, driverName, opts, labels) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "create"} + } + + s.setNamed(v, ref) + return v, nil +} + +// Create creates a volume with the given name and driver. +func (s *VolumeStore) Create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.create(name, driverName, opts, labels) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "create"} + } + s.setNamed(v, "") + return v, nil +} + +// create asks the given driver to create a volume with the name/opts. +// If a volume with the name is already known, it will ask the stored driver for the volume. +// If the passed in driver name does not match the driver name which is stored for the given volume name, an error is returned. +// It is expected that callers of this function hold any necessary locks. +func (s *VolumeStore) create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { + // Validate the name in a platform-specific manner + valid, err := volume.IsVolumeNameValid(name) + if err != nil { + return nil, err + } + if !valid { + return nil, &OpErr{Err: errInvalidName, Name: name, Op: "create"} + } + + if v, exists := s.getNamed(name); exists { + if v.DriverName() != driverName && driverName != "" && driverName != volume.DefaultDriverName { + return nil, errNameConflict + } + return v, nil + } + + // Since there isn't a specified driver name, let's see if any of the existing drivers have this volume name + if driverName == "" { + v, _ := s.getVolume(name) + if v != nil { + return v, nil + } + } + + vd, err := volumedrivers.GetDriver(driverName) + + if err != nil { + return nil, &OpErr{Op: "create", Name: name, Err: err} + } + + logrus.Debugf("Registering new volume reference: driver %q, name %q", vd.Name(), name) + + if v, _ := vd.Get(name); v != nil { + return v, nil + } + v, err := vd.Create(name, opts) + if err != nil { + return nil, err + } + s.globalLock.Lock() + s.labels[name] = labels + s.globalLock.Unlock() + + if s.db != nil { + metadata := &volumeMetadata{ + Name: name, + Labels: labels, + } + + volData, err := json.Marshal(metadata) + if err != nil { + return nil, err + } + + if err := s.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(volumeBucketName)) + err := b.Put([]byte(name), volData) + return err + }); err != nil { + return nil, err + } + } + + return volumeWithLabels{v, labels}, nil +} + +// GetWithRef gets a volume with the given name from the passed in driver and stores the ref +// This is just like Get(), but we store the reference while holding the lock. +// This makes sure there are no races between checking for the existence of a volume and adding a reference for it +func (s *VolumeStore) GetWithRef(name, driverName, ref string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + + v, err := vd.Get(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + + s.setNamed(v, ref) + if labels, ok := s.labels[name]; ok { + return volumeWithLabels{v, labels}, nil + } + return v, nil +} + +// Get looks if a volume with the given name exists and returns it if so +func (s *VolumeStore) Get(name string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.getVolume(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + s.setNamed(v, "") + return v, nil +} + +// getVolume requests the volume, if the driver info is stored it just accesses that driver, +// if the driver is unknown it probes all drivers until it finds the first volume with that name. +// it is expected that callers of this function hold any necessary locks +func (s *VolumeStore) getVolume(name string) (volume.Volume, error) { + labels := map[string]string{} + + if s.db != nil { + // get meta + if err := s.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(volumeBucketName)) + data := b.Get([]byte(name)) + + if string(data) == "" { + return nil + } + + var meta volumeMetadata + buf := bytes.NewBuffer(data) + + if err := json.NewDecoder(buf).Decode(&meta); err != nil { + return err + } + labels = meta.Labels + + return nil + }); err != nil { + return nil, err + } + } + + logrus.Debugf("Getting volume reference for name: %s", name) + s.globalLock.Lock() + v, exists := s.names[name] + s.globalLock.Unlock() + if exists { + vd, err := volumedrivers.GetDriver(v.DriverName()) + if err != nil { + return nil, err + } + vol, err := vd.Get(name) + if err != nil { + return nil, err + } + return volumeWithLabels{vol, labels}, nil + } + + logrus.Debugf("Probing all drivers for volume with name: %s", name) + drivers, err := volumedrivers.GetAllDrivers() + if err != nil { + return nil, err + } + + for _, d := range drivers { + v, err := d.Get(name) + if err != nil { + continue + } + + return volumeWithLabels{v, labels}, nil + } + return nil, errNoSuchVolume +} + +// Remove removes the requested volume. A volume is not removed if it has any refs +func (s *VolumeStore) Remove(v volume.Volume) error { + name := normaliseVolumeName(v.Name()) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + if refs, exists := s.refs[name]; exists && len(refs) > 0 { + return &OpErr{Err: errVolumeInUse, Name: v.Name(), Op: "remove", Refs: refs} + } + + vd, err := volumedrivers.GetDriver(v.DriverName()) + if err != nil { + return &OpErr{Err: err, Name: vd.Name(), Op: "remove"} + } + + logrus.Debugf("Removing volume reference: driver %s, name %s", v.DriverName(), name) + vol := withoutLabels(v) + if err := vd.Remove(vol); err != nil { + return &OpErr{Err: err, Name: name, Op: "remove"} + } + + s.purge(name) + return nil +} + +// Dereference removes the specified reference to the volume +func (s *VolumeStore) Dereference(v volume.Volume, ref string) { + s.locks.Lock(v.Name()) + defer s.locks.Unlock(v.Name()) + + s.globalLock.Lock() + defer s.globalLock.Unlock() + var refs []string + + for _, r := range s.refs[v.Name()] { + if r != ref { + refs = append(refs, r) + } + } + s.refs[v.Name()] = refs +} + +// Refs gets the current list of refs for the given volume +func (s *VolumeStore) Refs(v volume.Volume) []string { + s.locks.Lock(v.Name()) + defer s.locks.Unlock(v.Name()) + + s.globalLock.Lock() + defer s.globalLock.Unlock() + refs, exists := s.refs[v.Name()] + if !exists { + return nil + } + + refsOut := make([]string, len(refs)) + copy(refsOut, refs) + return refsOut +} + +// FilterByDriver returns the available volumes filtered by driver name +func (s *VolumeStore) FilterByDriver(name string) ([]volume.Volume, error) { + vd, err := volumedrivers.GetDriver(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "list"} + } + ls, err := vd.List() + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "list"} + } + return ls, nil +} + +// FilterByUsed returns the available volumes filtered by if they are in use or not. +// `used=true` returns only volumes that are being used, while `used=false` returns +// only volumes that are not being used. +func (s *VolumeStore) FilterByUsed(vols []volume.Volume, used bool) []volume.Volume { + return s.filter(vols, func(v volume.Volume) bool { + s.locks.Lock(v.Name()) + l := len(s.refs[v.Name()]) + s.locks.Unlock(v.Name()) + if (used && l > 0) || (!used && l == 0) { + return true + } + return false + }) +} + +// filterFunc defines a function to allow filter volumes in the store +type filterFunc func(vol volume.Volume) bool + +// filter returns the available volumes filtered by a filterFunc function +func (s *VolumeStore) filter(vols []volume.Volume, f filterFunc) []volume.Volume { + var ls []volume.Volume + for _, v := range vols { + if f(v) { + ls = append(ls, v) + } + } + return ls +} + +func withoutLabels(v volume.Volume) volume.Volume { + if vol, ok := v.(volumeWithLabels); ok { + return vol.Volume + } + + return v +} diff --git a/vendor/github.com/docker/docker/volume/store/store_unix.go b/vendor/github.com/docker/docker/volume/store/store_unix.go new file mode 100644 index 00000000..319c541d --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd + +package store + +// normaliseVolumeName is a platform specific function to normalise the name +// of a volume. This is a no-op on Unix-like platforms +func normaliseVolumeName(name string) string { + return name +} diff --git a/vendor/github.com/docker/docker/volume/volume.go b/vendor/github.com/docker/docker/volume/volume.go new file mode 100644 index 00000000..077cddfb --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume.go @@ -0,0 +1,133 @@ +package volume + +import ( + "fmt" + "os" + "runtime" + "strings" +) + +// DefaultDriverName is the driver name used for the driver +// implemented in the local package. +const DefaultDriverName string = "local" + +// Driver is for creating and removing volumes. +type Driver interface { + // Name returns the name of the volume driver. + Name() string + // Create makes a new volume with the given id. + Create(name string, opts map[string]string) (Volume, error) + // Remove deletes the volume. + Remove(vol Volume) (err error) + // List lists all the volumes the driver has + List() ([]Volume, error) + // Get retrieves the volume with the requested name + Get(name string) (Volume, error) +} + +// Volume is a place to store data. It is backed by a specific driver, and can be mounted. +type Volume interface { + // Name returns the name of the volume + Name() string + // DriverName returns the name of the driver which owns this volume. + DriverName() string + // Path returns the absolute path to the volume. + Path() string + // Mount mounts the volume and returns the absolute path to + // where it can be consumed. + Mount() (string, error) + // Unmount unmounts the volume when it is no longer in use. + Unmount() error +} + +// MountPoint is the intersection point between a volume and a container. It +// specifies which volume is to be used and where inside a container it should +// be mounted. +type MountPoint struct { + Source string // Container host directory + Destination string // Inside the container + RW bool // True if writable + Name string // Name set by user + Driver string // Volume driver to use + Volume Volume `json:"-"` + + // Note Mode is not used on Windows + Mode string `json:"Relabel"` // Originally field was `Relabel`" + + // Note Propagation is not used on Windows + Propagation string // Mount propagation string + Named bool // specifies if the mountpoint was specified by name + + // Specifies if data should be copied from the container before the first mount + // Use a pointer here so we can tell if the user set this value explicitly + // This allows us to error out when the user explicitly enabled copy but we can't copy due to the volume being populated + CopyData bool `json:"-"` +} + +// Setup sets up a mount point by either mounting the volume if it is +// configured, or creating the source directory if supplied. +func (m *MountPoint) Setup() (string, error) { + if m.Volume != nil { + return m.Volume.Mount() + } + if len(m.Source) > 0 { + if _, err := os.Stat(m.Source); err != nil { + if !os.IsNotExist(err) { + return "", err + } + if runtime.GOOS != "windows" { // Windows does not have deprecation issues here + if err := os.MkdirAll(m.Source, 0755); err != nil { + return "", err + } + } + } + return m.Source, nil + } + return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") +} + +// Path returns the path of a volume in a mount point. +func (m *MountPoint) Path() string { + if m.Volume != nil { + return m.Volume.Path() + } + return m.Source +} + +// ParseVolumesFrom ensures that the supplied volumes-from is valid. +func ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("malformed volumes-from specification: %s", spec) + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !ValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + // For now don't allow propagation properties while importing + // volumes from data container. These volumes will inherit + // the same propagation property as of the original volume + // in data container. This probably can be relaxed in future. + if HasPropagation(mode) { + return "", "", errInvalidMode(mode) + } + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +func errInvalidMode(mode string) error { + return fmt.Errorf("invalid mode: %v", mode) +} + +func errInvalidSpec(spec string) error { + return fmt.Errorf("Invalid volume specification: '%s'", spec) +} diff --git a/vendor/github.com/docker/docker/volume/volume_copy.go b/vendor/github.com/docker/docker/volume/volume_copy.go new file mode 100644 index 00000000..067537fb --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_copy.go @@ -0,0 +1,28 @@ +package volume + +import "strings" + +const ( + // DefaultCopyMode is the copy mode used by default for normal/named volumes + DefaultCopyMode = true +) + +// {=isEnabled} +var copyModes = map[string]bool{ + "nocopy": false, +} + +func copyModeExists(mode string) bool { + _, exists := copyModes[mode] + return exists +} + +// GetCopyMode gets the copy mode from the mode string for mounts +func getCopyMode(mode string) (bool, bool) { + for _, o := range strings.Split(mode, ",") { + if isEnabled, exists := copyModes[o]; exists { + return isEnabled, true + } + } + return DefaultCopyMode, false +} diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_linux.go b/vendor/github.com/docker/docker/volume/volume_propagation_linux.go new file mode 100644 index 00000000..f5f28205 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_propagation_linux.go @@ -0,0 +1,44 @@ +// +build linux + +package volume + +import ( + "strings" +) + +// DefaultPropagationMode defines what propagation mode should be used by +// default if user has not specified one explicitly. +const DefaultPropagationMode string = "rprivate" + +// propagation modes +var propagationModes = map[string]bool{ + "private": true, + "rprivate": true, + "slave": true, + "rslave": true, + "shared": true, + "rshared": true, +} + +// GetPropagation extracts and returns the mount propagation mode. If there +// are no specifications, then by default it is "private". +func GetPropagation(mode string) string { + for _, o := range strings.Split(mode, ",") { + if propagationModes[o] { + return o + } + } + return DefaultPropagationMode +} + +// HasPropagation checks if there is a valid propagation mode present in +// passed string. Returns true if a valid propagation mode specifier is +// present, false otherwise. +func HasPropagation(mode string) bool { + for _, o := range strings.Split(mode, ",") { + if propagationModes[o] { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go b/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go new file mode 100644 index 00000000..0edc89ab --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go @@ -0,0 +1,22 @@ +// +build !linux + +package volume + +// DefaultPropagationMode is used only in linux. In other cases it returns +// empty string. +const DefaultPropagationMode string = "" + +// propagation modes not supported on this platform. +var propagationModes = map[string]bool{} + +// GetPropagation is not supported. Return empty string. +func GetPropagation(mode string) string { + return DefaultPropagationMode +} + +// HasPropagation checks if there is a valid propagation mode present in +// passed string. Returns true if a valid propagation mode specifier is +// present, false otherwise. +func HasPropagation(mode string) bool { + return false +} diff --git a/vendor/github.com/docker/docker/volume/volume_unix.go b/vendor/github.com/docker/docker/volume/volume_unix.go new file mode 100644 index 00000000..2520d7c1 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_unix.go @@ -0,0 +1,186 @@ +// +build linux freebsd darwin solaris + +package volume + +import ( + "fmt" + "path/filepath" + "strings" +) + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, + "ro": true, +} + +// label modes +var labelModes = map[string]bool{ + "Z": true, + "z": true, +} + +// BackwardsCompatible decides whether this mount point can be +// used in old versions of Docker or not. +// Only bind mounts and local volumes can be used in old versions of Docker. +func (m *MountPoint) BackwardsCompatible() bool { + return len(m.Source) > 0 || m.Driver == DefaultDriverName +} + +// HasResource checks whether the given absolute path for a container is in +// this mount point. If the relative path starts with `../` then the resource +// is outside of this mount point, but we can't simply check for this prefix +// because it misses `..` which is also outside of the mount, so check both. +func (m *MountPoint) HasResource(absolutePath string) bool { + relPath, err := filepath.Rel(m.Destination, absolutePath) + return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) +} + +// ParseMountSpec validates the configuration of mount information is valid. +func ParseMountSpec(spec, volumeDriver string) (*MountPoint, error) { + spec = filepath.ToSlash(spec) + + mp := &MountPoint{ + RW: true, + Propagation: DefaultPropagationMode, + } + if strings.Count(spec, ":") > 2 { + return nil, errInvalidSpec(spec) + } + + arr := strings.SplitN(spec, ":", 3) + if arr[0] == "" { + return nil, errInvalidSpec(spec) + } + + switch len(arr) { + case 1: + // Just a destination path in the container + mp.Destination = filepath.Clean(arr[0]) + case 2: + if isValid := ValidMountMode(arr[1]); isValid { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. eg /foo:rw + return nil, errInvalidSpec(spec) + } + // Host Source Path or Name + Destination + mp.Source = arr[0] + mp.Destination = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + mp.Source = arr[0] + mp.Destination = arr[1] + mp.Mode = arr[2] // Mode field is used by SELinux to decide whether to apply label + if !ValidMountMode(mp.Mode) { + return nil, errInvalidMode(mp.Mode) + } + mp.RW = ReadWrite(mp.Mode) + mp.Propagation = GetPropagation(mp.Mode) + default: + return nil, errInvalidSpec(spec) + } + + //validate the volumes destination path + mp.Destination = filepath.Clean(mp.Destination) + if !filepath.IsAbs(mp.Destination) { + return nil, fmt.Errorf("Invalid volume destination path: '%s' mount path must be absolute.", mp.Destination) + } + + // Destination cannot be "/" + if mp.Destination == "/" { + return nil, fmt.Errorf("Invalid specification: destination can't be '/' in '%s'", spec) + } + + name, source := ParseVolumeSource(mp.Source) + if len(source) == 0 { + mp.Source = "" // Clear it out as we previously assumed it was not a name + mp.Driver = volumeDriver + // Named volumes can't have propagation properties specified. + // Their defaults will be decided by docker. This is just a + // safeguard. Don't want to get into situations where named + // volumes were mounted as '[r]shared' inside container and + // container does further mounts under that volume and these + // mounts become visible on host and later original volume + // cleanup becomes an issue if container does not unmount + // submounts explicitly. + if HasPropagation(mp.Mode) { + return nil, errInvalidSpec(spec) + } + } else { + mp.Source = filepath.Clean(source) + } + + copyData, isSet := getCopyMode(mp.Mode) + // do not allow copy modes on binds + if len(name) == 0 && isSet { + return nil, errInvalidMode(mp.Mode) + } + + mp.CopyData = copyData + mp.Name = name + + return mp, nil +} + +// ParseVolumeSource parses the origin sources that's mounted into the container. +// It returns a name and a source. It looks to see if the spec passed in +// is an absolute file. If it is, it assumes the spec is a source. If not, +// it assumes the spec is a name. +func ParseVolumeSource(spec string) (string, string) { + if !filepath.IsAbs(spec) { + return spec, "" + } + return "", spec +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func IsVolumeNameValid(name string) (bool, error) { + return true, nil +} + +// ValidMountMode will make sure the mount mode is valid. +// returns if it's a valid mount mode or not. +func ValidMountMode(mode string) bool { + rwModeCount := 0 + labelModeCount := 0 + propagationModeCount := 0 + copyModeCount := 0 + + for _, o := range strings.Split(mode, ",") { + switch { + case rwModes[o]: + rwModeCount++ + case labelModes[o]: + labelModeCount++ + case propagationModes[o]: + propagationModeCount++ + case copyModeExists(o): + copyModeCount++ + default: + return false + } + } + + // Only one string for each mode is allowed. + if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 { + return false + } + return true +} + +// ReadWrite tells you if a mode string is a valid read-write mode or not. +// If there are no specifications w.r.t read write mode, then by default +// it returns true. +func ReadWrite(mode string) bool { + if !ValidMountMode(mode) { + return false + } + + for _, o := range strings.Split(mode, ",") { + if o == "ro" { + return false + } + } + + return true +} diff --git a/vendor/github.com/docker/engine-api/CHANGELOG.md b/vendor/github.com/docker/engine-api/CHANGELOG.md index 33cefd6a..d7f558ff 100644 --- a/vendor/github.com/docker/engine-api/CHANGELOG.md +++ b/vendor/github.com/docker/engine-api/CHANGELOG.md @@ -2,18 +2,6 @@ Items starting with DEPRECATE are important deprecation notices. For more information on the list of deprecated APIs please have a look at https://docs.docker.com/misc/deprecated/ where target removal dates can also be found. -## 0.3.2 (2016-03-30) - -## Client - -- Revert setting the ServerName in the TLS configuration at client init. See https://github.com/docker/swarm/issues/2027. - -## 0.3.1 (2016-03-23) - -### Client - -- Ensure that API paths are properly escaped. - ## 0.3.0 (2016-03-22) ### Client diff --git a/vendor/github.com/docker/engine-api/MAINTAINERS b/vendor/github.com/docker/engine-api/MAINTAINERS index f86ffe0b..2bc4fd6d 100644 --- a/vendor/github.com/docker/engine-api/MAINTAINERS +++ b/vendor/github.com/docker/engine-api/MAINTAINERS @@ -11,29 +11,10 @@ [Org] [Org."Core maintainers"] people = [ - "aaronlehmann", "calavera", - "coolljt0725", - "cpuguy83", - "crosbymichael", - "dnephin", "dongluochen", - "duglin", - "estesp", - "icecrime", - "jhowardmsft", - "lk4d4", - "mavenugo", "mhbauer", - "runcom", - "stevvooe", - "thajeztah", - "tianon", - "tibor", - "tonistiigi", - "unclejack", - "vdemeester", - "vieux" + "vdemeester" ] [people] @@ -43,118 +24,22 @@ # in the people section. # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.aaronlehmann] - Name = "Aaron Lehmann" - Email = "aaron.lehmann@docker.com" - GitHub = "aaronlehmann" - [people.calavera] Name = "David Calavera" Email = "david.calavera@gmail.com" GitHub = "calavera" - [people.coolljt0725] - Name = "Lei Jitang" - Email = "leijitang@huawei.com" - GitHub = "coolljt0725" - - [people.cpuguy83] - Name = "Brian Goff" - Email = "cpuguy83@gmail.com" - Github = "cpuguy83" - - [people.crosbymichael] - Name = "Michael Crosby" - Email = "crosbymichael@gmail.com" - GitHub = "crosbymichael" - - [people.dnephin] - Name = "Daniel Nephin" - Email = "dnephin@gmail.com" - GitHub = "dnephin" - [people.dongluochen] Name = "Dongluo Chen" Email = "dongluo.chen@docker.com" GitHub = "dongluochen" - [people.duglin] - Name = "Doug Davis" - Email = "dug@us.ibm.com" - GitHub = "duglin" - - [people.estesp] - Name = "Phil Estes" - Email = "estesp@linux.vnet.ibm.com" - GitHub = "estesp" - - [people.icecrime] - Name = "Arnaud Porterie" - Email = "arnaud@docker.com" - GitHub = "icecrime" - - [people.jhowardmsft] - Name = "John Howard" - Email = "jhoward@microsoft.com" - GitHub = "jhowardmsft" - - [people.lk4d4] - Name = "Alexander Morozov" - Email = "lk4d4@docker.com" - GitHub = "lk4d4" - - [people.mavenugo] - Name = "Madhu Venugopal" - Email = "madhu@docker.com" - GitHub = "mavenugo" - [people.mhbauer] Name = "Morgan Bauer" Email = "mbauer@us.ibm.com" GitHub = "mhbauer" - [people.runcom] - Name = "Antonio Murdaca" - Email = "runcom@redhat.com" - GitHub = "runcom" - - [people.stevvooe] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - GitHub = "stevvooe" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.tianon] - Name = "Tianon Gravi" - Email = "admwiggin@gmail.com" - GitHub = "tianon" - - [people.tibor] - Name = "Tibor Vass" - Email = "tibor@docker.com" - GitHub = "tiborvass" - - [people.tonistiigi] - Name = "Tõnis Tiigi" - Email = "tonis@docker.com" - GitHub = "tonistiigi" - - [people.unclejack] - Name = "Cristian Staretu" - Email = "cristian.staretu@gmail.com" - GitHub = "unclejack" - [people.vdemeester] Name = "Vincent Demeester" Email = "vincent@sbr.pm" GitHub = "vdemeester" - - [people.vieux] - Name = "Victor Vieux" - Email = "vieux@docker.com" - GitHub = "vieux" diff --git a/vendor/github.com/docker/engine-api/Makefile b/vendor/github.com/docker/engine-api/Makefile index 51b0ca31..a4a8595a 100644 --- a/vendor/github.com/docker/engine-api/Makefile +++ b/vendor/github.com/docker/engine-api/Makefile @@ -1,4 +1,4 @@ -.PHONY: all deps test validate lint +.PHONY: all deps test validate all: deps test validate @@ -9,13 +9,7 @@ deps: test: go test -race -cover ./... -validate: lint +validate: go vet ./... + test -z "$(golint ./... | tee /dev/stderr)" test -z "$(gofmt -s -l . | tee /dev/stderr)" - -lint: - out="$$(golint ./...)"; \ - if [ -n "$$(golint ./...)" ]; then \ - echo "$$out"; \ - exit 1; \ - fi diff --git a/vendor/github.com/docker/engine-api/client/client.go b/vendor/github.com/docker/engine-api/client/client.go index 8c8c6fd1..0716667b 100644 --- a/vendor/github.com/docker/engine-api/client/client.go +++ b/vendor/github.com/docker/engine-api/client/client.go @@ -21,7 +21,7 @@ type Client struct { addr string // basePath holds the path to prepend to the requests. basePath string - // transport is the interface to send request with, it implements transport.Client. + // transport is the interface to sends request with, it implements transport.Client. transport transport.Client // version of the server to talk to. version string @@ -114,12 +114,6 @@ func (cli *Client) ClientVersion() string { return cli.version } -// UpdateClientVersion updates the version string associated with this -// instance of the Client. -func (cli *Client) UpdateClientVersion(v string) { - cli.version = v -} - // ParseHost verifies that the given host strings is valid. func ParseHost(host string) (string, string, string, error) { protoAddrParts := strings.SplitN(host, "://", 2) diff --git a/vendor/github.com/docker/engine-api/client/container_attach.go b/vendor/github.com/docker/engine-api/client/container_attach.go index 1b616bf0..d87fc655 100644 --- a/vendor/github.com/docker/engine-api/client/container_attach.go +++ b/vendor/github.com/docker/engine-api/client/container_attach.go @@ -11,7 +11,7 @@ import ( // It returns a types.HijackedConnection with the hijacked connection // and the a reader to get output. It's up to the called to close // the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { +func (cli *Client) ContainerAttach(ctx context.Context, options types.ContainerAttachOptions) (types.HijackedResponse, error) { query := url.Values{} if options.Stream { query.Set("stream", "1") @@ -30,5 +30,5 @@ func (cli *Client) ContainerAttach(ctx context.Context, container string, option } headers := map[string][]string{"Content-Type": {"text/plain"}} - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) + return cli.postHijacked(ctx, "/containers/"+options.ContainerID+"/attach", query, nil, headers) } diff --git a/vendor/github.com/docker/engine-api/client/container_commit.go b/vendor/github.com/docker/engine-api/client/container_commit.go index d5c47499..8a6c8993 100644 --- a/vendor/github.com/docker/engine-api/client/container_commit.go +++ b/vendor/github.com/docker/engine-api/client/container_commit.go @@ -2,36 +2,18 @@ package client import ( "encoding/json" - "errors" "net/url" - distreference "github.com/docker/distribution/reference" "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" "golang.org/x/net/context" ) // ContainerCommit applies changes into a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) { - var repository, tag string - if options.Reference != "" { - distributionRef, err := distreference.ParseNamed(options.Reference) - if err != nil { - return types.ContainerCommitResponse{}, err - } - - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return types.ContainerCommitResponse{}, errors.New("refusing to create a tag with a digest reference") - } - - tag = reference.GetTagFromNamedRef(distributionRef) - repository = distributionRef.Name() - } - +func (cli *Client) ContainerCommit(ctx context.Context, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) { query := url.Values{} - query.Set("container", container) - query.Set("repo", repository) - query.Set("tag", tag) + query.Set("container", options.ContainerID) + query.Set("repo", options.RepositoryName) + query.Set("tag", options.Tag) query.Set("comment", options.Comment) query.Set("author", options.Author) for _, change := range options.Changes { diff --git a/vendor/github.com/docker/engine-api/client/container_copy.go b/vendor/github.com/docker/engine-api/client/container_copy.go index d3dd0b11..aaf1f775 100644 --- a/vendor/github.com/docker/engine-api/client/container_copy.go +++ b/vendor/github.com/docker/engine-api/client/container_copy.go @@ -30,17 +30,17 @@ func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri } // CopyToContainer copies content into the container filesystem. -func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { +func (cli *Client) CopyToContainer(ctx context.Context, options types.CopyToContainerOptions) error { query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + query.Set("path", filepath.ToSlash(options.Path)) // Normalize the paths used in the API. // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. if !options.AllowOverwriteDirWithFile { query.Set("noOverwriteDirNonDir", "true") } - apiPath := fmt.Sprintf("/containers/%s/archive", container) + path := fmt.Sprintf("/containers/%s/archive", options.ContainerID) - response, err := cli.putRaw(ctx, apiPath, query, content, nil) + response, err := cli.putRaw(ctx, path, query, options.Content, nil) if err != nil { return err } @@ -53,13 +53,13 @@ func (cli *Client) CopyToContainer(ctx context.Context, container, path string, return nil } -// CopyFromContainer gets the content from the container and returns it as a Reader +// CopyFromContainer get the content from the container and return it as a Reader // to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { +func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { query := make(url.Values, 1) query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - apiPath := fmt.Sprintf("/containers/%s/archive", container) + apiPath := fmt.Sprintf("/containers/%s/archive", containerID) response, err := cli.get(ctx, apiPath, query, nil) if err != nil { return nil, types.ContainerPathStat{}, err diff --git a/vendor/github.com/docker/engine-api/client/container_exec.go b/vendor/github.com/docker/engine-api/client/container_exec.go index ff7e1a9d..159c9dfd 100644 --- a/vendor/github.com/docker/engine-api/client/container_exec.go +++ b/vendor/github.com/docker/engine-api/client/container_exec.go @@ -8,9 +8,9 @@ import ( ) // ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) { +func (cli *Client) ContainerExecCreate(ctx context.Context, config types.ExecConfig) (types.ContainerExecCreateResponse, error) { var response types.ContainerExecCreateResponse - resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + resp, err := cli.post(ctx, "/containers/"+config.Container+"/exec", nil, config, nil) if err != nil { return response, err } @@ -19,7 +19,7 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co return response, err } -// ContainerExecStart starts an exec process already created in the docker host. +// ContainerExecStart starts an exec process already create in the docker host. func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/engine-api/client/container_export.go b/vendor/github.com/docker/engine-api/client/container_export.go index 52194f3d..1925113e 100644 --- a/vendor/github.com/docker/engine-api/client/container_export.go +++ b/vendor/github.com/docker/engine-api/client/container_export.go @@ -8,7 +8,7 @@ import ( ) // ContainerExport retrieves the raw contents of a container -// and returns them as an io.ReadCloser. It's up to the caller +// and returns them as a io.ReadCloser. It's up to the caller // to close the stream. func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) diff --git a/vendor/github.com/docker/engine-api/client/container_list.go b/vendor/github.com/docker/engine-api/client/container_list.go index 87f7333d..573f41d5 100644 --- a/vendor/github.com/docker/engine-api/client/container_list.go +++ b/vendor/github.com/docker/engine-api/client/container_list.go @@ -35,8 +35,7 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis } if options.Filter.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filter) - + filterJSON, err := filters.ToParam(options.Filter) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/engine-api/client/container_logs.go b/vendor/github.com/docker/engine-api/client/container_logs.go index 08b9b918..47c60ee3 100644 --- a/vendor/github.com/docker/engine-api/client/container_logs.go +++ b/vendor/github.com/docker/engine-api/client/container_logs.go @@ -13,7 +13,7 @@ import ( // ContainerLogs returns the logs generated by a container in an io.ReadCloser. // It's up to the caller to close the stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { +func (cli *Client) ContainerLogs(ctx context.Context, options types.ContainerLogsOptions) (io.ReadCloser, error) { query := url.Values{} if options.ShowStdout { query.Set("stdout", "1") @@ -35,16 +35,12 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options query.Set("timestamps", "1") } - if options.Details { - query.Set("details", "1") - } - if options.Follow { query.Set("follow", "1") } query.Set("tail", options.Tail) - resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + resp, err := cli.get(ctx, "/containers/"+options.ContainerID+"/logs", query, nil) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/engine-api/client/container_remove.go b/vendor/github.com/docker/engine-api/client/container_remove.go index cef4b812..56796231 100644 --- a/vendor/github.com/docker/engine-api/client/container_remove.go +++ b/vendor/github.com/docker/engine-api/client/container_remove.go @@ -8,7 +8,7 @@ import ( ) // ContainerRemove kills and removes a container from the docker host. -func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { +func (cli *Client) ContainerRemove(ctx context.Context, options types.ContainerRemoveOptions) error { query := url.Values{} if options.RemoveVolumes { query.Set("v", "1") @@ -21,7 +21,7 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti query.Set("force", "1") } - resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + resp, err := cli.delete(ctx, "/containers/"+options.ContainerID, query, nil) ensureReaderClosed(resp) return err } diff --git a/vendor/github.com/docker/engine-api/client/container_resize.go b/vendor/github.com/docker/engine-api/client/container_resize.go index b95d26b3..07820174 100644 --- a/vendor/github.com/docker/engine-api/client/container_resize.go +++ b/vendor/github.com/docker/engine-api/client/container_resize.go @@ -9,13 +9,13 @@ import ( ) // ContainerResize changes the size of the tty for a container. -func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +func (cli *Client) ContainerResize(ctx context.Context, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+options.ID, options.Height, options.Width) } // ContainerExecResize changes the size of the tty for an exec process running inside a container. -func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +func (cli *Client) ContainerExecResize(ctx context.Context, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+options.ID, options.Height, options.Width) } func (cli *Client) resize(ctx context.Context, basePath string, height, width int) error { diff --git a/vendor/github.com/docker/engine-api/client/container_wait.go b/vendor/github.com/docker/engine-api/client/container_wait.go index c26ff3f3..ca8c443b 100644 --- a/vendor/github.com/docker/engine-api/client/container_wait.go +++ b/vendor/github.com/docker/engine-api/client/container_wait.go @@ -8,7 +8,7 @@ import ( "github.com/docker/engine-api/types" ) -// ContainerWait pauses execution until a container exits. +// ContainerWait pauses execution util a container is exits. // It returns the API status code as response of its readiness. func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int, error) { resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) diff --git a/vendor/github.com/docker/engine-api/client/errors.go b/vendor/github.com/docker/engine-api/client/errors.go index 17828bb7..9bcc78eb 100644 --- a/vendor/github.com/docker/engine-api/client/errors.go +++ b/vendor/github.com/docker/engine-api/client/errors.go @@ -5,7 +5,7 @@ import ( "fmt" ) -// ErrConnectionFailed is an error raised when the connection between the client and the server failed. +// ErrConnectionFailed is a error raised when the connection between the client and the server failed. var ErrConnectionFailed = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") // imageNotFoundError implements an error returned when an image is not in the docker host. @@ -30,7 +30,7 @@ type containerNotFoundError struct { containerID string } -// Error returns a string representation of a containerNotFoundError +// Error returns a string representation of an containerNotFoundError func (e containerNotFoundError) Error() string { return fmt.Sprintf("Error: No such container: %s", e.containerID) } @@ -47,7 +47,7 @@ type networkNotFoundError struct { networkID string } -// Error returns a string representation of a networkNotFoundError +// Error returns a string representation of an networkNotFoundError func (e networkNotFoundError) Error() string { return fmt.Sprintf("Error: No such network: %s", e.networkID) } @@ -64,7 +64,7 @@ type volumeNotFoundError struct { volumeID string } -// Error returns a string representation of a networkNotFoundError +// Error returns a string representation of an networkNotFoundError func (e volumeNotFoundError) Error() string { return fmt.Sprintf("Error: No such volume: %s", e.volumeID) } @@ -87,7 +87,7 @@ func (u unauthorizedError) Error() string { } // IsErrUnauthorized returns true if the error is caused -// when a remote registry authentication fails +// when an the remote registry authentication fails func IsErrUnauthorized(err error) bool { _, ok := err.(unauthorizedError) return ok diff --git a/vendor/github.com/docker/engine-api/client/hijack.go b/vendor/github.com/docker/engine-api/client/hijack.go index dbd91ef6..2fd46b6c 100644 --- a/vendor/github.com/docker/engine-api/client/hijack.go +++ b/vendor/github.com/docker/engine-api/client/hijack.go @@ -68,11 +68,11 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu defer clientconn.Close() // Server hijacks the connection, error 'connection closed' expected - _, err = clientconn.Do(req) + clientconn.Do(req) rwc, br := clientconn.Hijack() - return types.HijackedResponse{Conn: rwc, Reader: br}, err + return types.HijackedResponse{Conn: rwc, Reader: br}, nil } func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { diff --git a/vendor/github.com/docker/engine-api/client/image_build.go b/vendor/github.com/docker/engine-api/client/image_build.go index 4165c4e9..1612c6a1 100644 --- a/vendor/github.com/docker/engine-api/client/image_build.go +++ b/vendor/github.com/docker/engine-api/client/image_build.go @@ -3,7 +3,6 @@ package client import ( "encoding/base64" "encoding/json" - "io" "net/http" "net/url" "regexp" @@ -21,7 +20,7 @@ var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) // ImageBuild sends request to the daemon to build images. // The Body in the response implement an io.ReadCloser and it's up to the caller to // close it. -func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { +func (cli *Client) ImageBuild(ctx context.Context, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { query, err := imageBuildOptionsToQuery(options) if err != nil { return types.ImageBuildResponse{}, err @@ -35,7 +34,7 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) headers.Set("Content-Type", "application/tar") - serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + serverResp, err := cli.postRaw(ctx, "/build", query, options.Context, headers) if err != nil { return types.ImageBuildResponse{}, err } diff --git a/vendor/github.com/docker/engine-api/client/image_create.go b/vendor/github.com/docker/engine-api/client/image_create.go index 6dfc0391..1ec1f9d0 100644 --- a/vendor/github.com/docker/engine-api/client/image_create.go +++ b/vendor/github.com/docker/engine-api/client/image_create.go @@ -7,20 +7,14 @@ import ( "golang.org/x/net/context" "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" ) // ImageCreate creates a new image based in the parent options. // It returns the JSON content in the response body. -func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { - repository, tag, err := reference.Parse(parentReference) - if err != nil { - return nil, err - } - +func (cli *Client) ImageCreate(ctx context.Context, options types.ImageCreateOptions) (io.ReadCloser, error) { query := url.Values{} - query.Set("fromImage", repository) - query.Set("tag", tag) + query.Set("fromImage", options.Parent) + query.Set("tag", options.Tag) resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) if err != nil { return nil, err diff --git a/vendor/github.com/docker/engine-api/client/image_import.go b/vendor/github.com/docker/engine-api/client/image_import.go index 4e8749a0..48e2c951 100644 --- a/vendor/github.com/docker/engine-api/client/image_import.go +++ b/vendor/github.com/docker/engine-api/client/image_import.go @@ -6,30 +6,22 @@ import ( "golang.org/x/net/context" - "github.com/docker/distribution/reference" "github.com/docker/engine-api/types" ) // ImageImport creates a new image based in the source options. // It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { - if ref != "" { - //Check if the given image name can be resolved - if _, err := reference.ParseNamed(ref); err != nil { - return nil, err - } - } - +func (cli *Client) ImageImport(ctx context.Context, options types.ImageImportOptions) (io.ReadCloser, error) { query := url.Values{} - query.Set("fromSrc", source.SourceName) - query.Set("repo", ref) + query.Set("fromSrc", options.SourceName) + query.Set("repo", options.RepositoryName) query.Set("tag", options.Tag) query.Set("message", options.Message) for _, change := range options.Changes { query.Add("changes", change) } - resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + resp, err := cli.postRaw(ctx, "/images/create", query, options.Source, nil) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/engine-api/client/image_inspect.go b/vendor/github.com/docker/engine-api/client/image_inspect.go index 859ba640..761a994c 100644 --- a/vendor/github.com/docker/engine-api/client/image_inspect.go +++ b/vendor/github.com/docker/engine-api/client/image_inspect.go @@ -11,7 +11,7 @@ import ( "golang.org/x/net/context" ) -// ImageInspectWithRaw returns the image information and its raw representation. +// ImageInspectWithRaw returns the image information and it's raw representation. func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string, getSize bool) (types.ImageInspect, []byte, error) { query := url.Values{} if getSize { diff --git a/vendor/github.com/docker/engine-api/client/image_load.go b/vendor/github.com/docker/engine-api/client/image_load.go index 72f55fdc..84ee19c3 100644 --- a/vendor/github.com/docker/engine-api/client/image_load.go +++ b/vendor/github.com/docker/engine-api/client/image_load.go @@ -10,8 +10,8 @@ import ( ) // ImageLoad loads an image in the docker host from the client host. -// It's up to the caller to close the io.ReadCloser in the -// ImageLoadResponse returned by this function. +// It's up to the caller to close the io.ReadCloser returned by +// this function. func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { v := url.Values{} v.Set("quiet", "0") diff --git a/vendor/github.com/docker/engine-api/client/image_pull.go b/vendor/github.com/docker/engine-api/client/image_pull.go index 9287604a..09044376 100644 --- a/vendor/github.com/docker/engine-api/client/image_pull.go +++ b/vendor/github.com/docker/engine-api/client/image_pull.go @@ -8,32 +8,22 @@ import ( "golang.org/x/net/context" "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" ) -// ImagePull requests the docker host to pull an image from a remote registry. +// ImagePull request the docker host to pull an image from a remote registry. // It executes the privileged function if the operation is unauthorized // and it tries one more time. // It's up to the caller to handle the io.ReadCloser and close it properly. -// -// FIXME(vdemeester): there is currently used in a few way in docker/docker -// - if not in trusted content, ref is used to pass the whole reference, and tag is empty -// - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { - repository, tag, err := reference.Parse(ref) - if err != nil { - return nil, err - } - +func (cli *Client) ImagePull(ctx context.Context, options types.ImagePullOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error) { query := url.Values{} - query.Set("fromImage", repository) - if tag != "" && !options.All { - query.Set("tag", tag) + query.Set("fromImage", options.ImageID) + if options.Tag != "" { + query.Set("tag", options.Tag) } resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) if resp.statusCode == http.StatusUnauthorized { - newAuthHeader, privilegeErr := options.PrivilegeFunc() + newAuthHeader, privilegeErr := privilegeFunc() if privilegeErr != nil { return nil, privilegeErr } diff --git a/vendor/github.com/docker/engine-api/client/image_push.go b/vendor/github.com/docker/engine-api/client/image_push.go index 9c837a76..ca2cb43b 100644 --- a/vendor/github.com/docker/engine-api/client/image_push.go +++ b/vendor/github.com/docker/engine-api/client/image_push.go @@ -1,46 +1,30 @@ package client import ( - "errors" "io" "net/http" "net/url" "golang.org/x/net/context" - distreference "github.com/docker/distribution/reference" "github.com/docker/engine-api/types" ) -// ImagePush requests the docker host to push an image to a remote registry. +// ImagePush request the docker host to push an image to a remote registry. // It executes the privileged function if the operation is unauthorized // and it tries one more time. // It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return nil, err - } - - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return nil, errors.New("cannot push a digest reference") - } - - var tag = "" - if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged { - tag = nameTaggedRef.Tag() - } - +func (cli *Client) ImagePush(ctx context.Context, options types.ImagePushOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error) { query := url.Values{} - query.Set("tag", tag) + query.Set("tag", options.Tag) - resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth) + resp, err := cli.tryImagePush(ctx, options.ImageID, query, options.RegistryAuth) if resp.statusCode == http.StatusUnauthorized { - newAuthHeader, privilegeErr := options.PrivilegeFunc() + newAuthHeader, privilegeErr := privilegeFunc() if privilegeErr != nil { return nil, privilegeErr } - resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader) + resp, err = cli.tryImagePush(ctx, options.ImageID, query, newAuthHeader) } if err != nil { return nil, err diff --git a/vendor/github.com/docker/engine-api/client/image_remove.go b/vendor/github.com/docker/engine-api/client/image_remove.go index 47224326..d7e71c89 100644 --- a/vendor/github.com/docker/engine-api/client/image_remove.go +++ b/vendor/github.com/docker/engine-api/client/image_remove.go @@ -9,7 +9,7 @@ import ( ) // ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) { +func (cli *Client) ImageRemove(ctx context.Context, options types.ImageRemoveOptions) ([]types.ImageDelete, error) { query := url.Values{} if options.Force { @@ -19,7 +19,7 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type query.Set("noprune", "1") } - resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + resp, err := cli.delete(ctx, "/images/"+options.ImageID, query, nil) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/engine-api/client/image_save.go b/vendor/github.com/docker/engine-api/client/image_save.go index ecac880a..c0feb3ed 100644 --- a/vendor/github.com/docker/engine-api/client/image_save.go +++ b/vendor/github.com/docker/engine-api/client/image_save.go @@ -7,7 +7,7 @@ import ( "golang.org/x/net/context" ) -// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// ImageSave retrieves one or more images from the docker host as a io.ReadCloser. // It's up to the caller to store the images and close the stream. func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { query := url.Values{ diff --git a/vendor/github.com/docker/engine-api/client/image_search.go b/vendor/github.com/docker/engine-api/client/image_search.go index 571ba3df..ebe9dc0b 100644 --- a/vendor/github.com/docker/engine-api/client/image_search.go +++ b/vendor/github.com/docker/engine-api/client/image_search.go @@ -6,29 +6,20 @@ import ( "net/url" "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" "github.com/docker/engine-api/types/registry" "golang.org/x/net/context" ) // ImageSearch makes the docker host to search by a term in a remote registry. // The list of results is not sorted in any fashion. -func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { +func (cli *Client) ImageSearch(ctx context.Context, options types.ImageSearchOptions, privilegeFunc RequestPrivilegeFunc) ([]registry.SearchResult, error) { var results []registry.SearchResult query := url.Values{} - query.Set("term", term) - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) - if err != nil { - return results, err - } - query.Set("filters", filterJSON) - } + query.Set("term", options.Term) resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) if resp.statusCode == http.StatusUnauthorized { - newAuthHeader, privilegeErr := options.PrivilegeFunc() + newAuthHeader, privilegeErr := privilegeFunc() if privilegeErr != nil { return results, privilegeErr } diff --git a/vendor/github.com/docker/engine-api/client/image_tag.go b/vendor/github.com/docker/engine-api/client/image_tag.go index 490de4e5..20feda38 100644 --- a/vendor/github.com/docker/engine-api/client/image_tag.go +++ b/vendor/github.com/docker/engine-api/client/image_tag.go @@ -1,38 +1,22 @@ package client import ( - "errors" - "fmt" "net/url" - "golang.org/x/net/context" - - distreference "github.com/docker/distribution/reference" "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" + "golang.org/x/net/context" ) // ImageTag tags an image in the docker host -func (cli *Client) ImageTag(ctx context.Context, imageID, ref string, options types.ImageTagOptions) error { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref) - } - - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - - tag := reference.GetTagFromNamedRef(distributionRef) - +func (cli *Client) ImageTag(ctx context.Context, options types.ImageTagOptions) error { query := url.Values{} - query.Set("repo", distributionRef.Name()) - query.Set("tag", tag) + query.Set("repo", options.RepositoryName) + query.Set("tag", options.Tag) if options.Force { query.Set("force", "1") } - resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil) + resp, err := cli.post(ctx, "/images/"+options.ImageID+"/tag", query, nil, nil) ensureReaderClosed(resp) return err } diff --git a/vendor/github.com/docker/engine-api/client/interface.go b/vendor/github.com/docker/engine-api/client/interface.go index 2c6872f5..e95ed555 100644 --- a/vendor/github.com/docker/engine-api/client/interface.go +++ b/vendor/github.com/docker/engine-api/client/interface.go @@ -15,60 +15,59 @@ import ( // APIClient is an interface that clients that talk with a docker server must implement. type APIClient interface { ClientVersion() string - ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) + ContainerAttach(ctx context.Context, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) - ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error) + ContainerDiff(ctx context.Context, ontainerID string) ([]types.ContainerChange, error) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) + ContainerExecCreate(ctx context.Context, config types.ExecConfig) (types.ContainerExecCreateResponse, error) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) - ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecResize(ctx context.Context, options types.ResizeOptions) error ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error - ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) - ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) - ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) - ContainerKill(ctx context.Context, container, signal string) error + ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, containerID, signal string) error ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) - ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) - ContainerPause(ctx context.Context, container string) error - ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error - ContainerRename(ctx context.Context, container, newContainerName string) error - ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error - ContainerRestart(ctx context.Context, container string, timeout int) error - ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) - ContainerStats(ctx context.Context, container string, stream bool) (io.ReadCloser, error) - ContainerStart(ctx context.Context, container string) error - ContainerStop(ctx context.Context, container string, timeout int) error - ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error) - ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) error - ContainerWait(ctx context.Context, container string) (int, error) - CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) - CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error + ContainerLogs(ctx context.Context, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, containerID string) error + ContainerRemove(ctx context.Context, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, containerID, newContainerName string) error + ContainerResize(ctx context.Context, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, containerID string, timeout int) error + ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) + ContainerStart(ctx context.Context, containerID string) error + ContainerStop(ctx context.Context, containerID string, timeout int) error + ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) + ContainerUnpause(ctx context.Context, containerID string) error + ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) error + ContainerWait(ctx context.Context, containerID string) (int, error) + CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, options types.CopyToContainerOptions) error Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) - ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) - ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) - ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error) - ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) - ImageInspectWithRaw(ctx context.Context, image string, getSize bool) (types.ImageInspect, []byte, error) + ImageBuild(ctx context.Context, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + ImageCreate(ctx context.Context, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) + ImageImport(ctx context.Context, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, imageID string, getSize bool) (types.ImageInspect, []byte, error) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) - ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) - ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) - ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) - ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) - ImageTag(ctx context.Context, image, ref string, options types.ImageTagOptions) error + ImagePull(ctx context.Context, options types.ImagePullOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error) + ImagePush(ctx context.Context, options types.ImagePushOptions, privilegeFunc RequestPrivilegeFunc) (io.ReadCloser, error) + ImageRemove(ctx context.Context, options types.ImageRemoveOptions) ([]types.ImageDelete, error) + ImageSearch(ctx context.Context, options types.ImageSearchOptions, privilegeFunc RequestPrivilegeFunc) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, options types.ImageTagOptions) error Info(ctx context.Context) (types.Info, error) - NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error - NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) - NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error + NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error + NetworkCreate(ctx context.Context, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) NetworkRemove(ctx context.Context, networkID string) error RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) ServerVersion(ctx context.Context) (types.Version, error) - UpdateClientVersion(v string) VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) diff --git a/vendor/github.com/docker/engine-api/client/network_create.go b/vendor/github.com/docker/engine-api/client/network_create.go index c9c0b9fd..2c41ad7e 100644 --- a/vendor/github.com/docker/engine-api/client/network_create.go +++ b/vendor/github.com/docker/engine-api/client/network_create.go @@ -8,13 +8,9 @@ import ( ) // NetworkCreate creates a new network in the docker host. -func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { - networkCreateRequest := types.NetworkCreateRequest{ - NetworkCreate: options, - Name: name, - } +func (cli *Client) NetworkCreate(ctx context.Context, options types.NetworkCreate) (types.NetworkCreateResponse, error) { var response types.NetworkCreateResponse - serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + serverResp, err := cli.post(ctx, "/networks/create", nil, options, nil) if err != nil { return response, err } diff --git a/vendor/github.com/docker/engine-api/client/privileged.go b/vendor/github.com/docker/engine-api/client/privileged.go new file mode 100644 index 00000000..945f18ce --- /dev/null +++ b/vendor/github.com/docker/engine-api/client/privileged.go @@ -0,0 +1,9 @@ +package client + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) diff --git a/vendor/github.com/docker/engine-api/client/request.go b/vendor/github.com/docker/engine-api/client/request.go index cdbb0975..4ad2d24b 100644 --- a/vendor/github.com/docker/engine-api/client/request.go +++ b/vendor/github.com/docker/engine-api/client/request.go @@ -85,11 +85,6 @@ func (cli *Client) sendClientRequest(ctx context.Context, method, path string, q } req, err := cli.newRequest(method, path, query, body, headers) - if cli.proto == "unix" || cli.proto == "npipe" { - // For local communications, it doesn't matter what the host is. We just - // need a valid and meaningful host name. (See #189) - req.Host = "docker" - } req.URL.Host = cli.addr req.URL.Scheme = cli.transport.Scheme() diff --git a/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go b/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go index 931ae10a..458a9c96 100644 --- a/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go +++ b/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go @@ -2,7 +2,7 @@ package blkiodev import "fmt" -// WeightDevice is a structure that holds device:weight pair +// WeightDevice is a structure that hold device:weight pair type WeightDevice struct { Path string Weight uint16 @@ -12,7 +12,7 @@ func (w *WeightDevice) String() string { return fmt.Sprintf("%s:%d", w.Path, w.Weight) } -// ThrottleDevice is a structure that holds device:rate_per_second pair +// ThrottleDevice is a structure that hold device:rate_per_second pair type ThrottleDevice struct { Path string Rate uint64 diff --git a/vendor/github.com/docker/engine-api/types/client.go b/vendor/github.com/docker/engine-api/types/client.go index fa3b2cfb..f09ad02e 100644 --- a/vendor/github.com/docker/engine-api/types/client.go +++ b/vendor/github.com/docker/engine-api/types/client.go @@ -12,21 +12,24 @@ import ( // ContainerAttachOptions holds parameters to attach to a container. type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string + ContainerID string + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string } // ContainerCommitOptions holds parameters to commit changes into a container. type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config + ContainerID string + RepositoryName string + Tag string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config } // ContainerExecInspect holds information returned by exec inspect. @@ -51,17 +54,18 @@ type ContainerListOptions struct { // ContainerLogsOptions holds parameters to filter logs with. type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Timestamps bool - Follow bool - Tail string - Details bool + ContainerID string + ShowStdout bool + ShowStderr bool + Since string + Timestamps bool + Follow bool + Tail string } // ContainerRemoveOptions holds parameters to remove containers. type ContainerRemoveOptions struct { + ContainerID string RemoveVolumes bool RemoveLinks bool Force bool @@ -70,6 +74,9 @@ type ContainerRemoveOptions struct { // CopyToContainerOptions holds information // about files to copy into a container type CopyToContainerOptions struct { + ContainerID string + Path string + Content io.Reader AllowOverwriteDirWithFile bool } @@ -96,7 +103,7 @@ func (h *HijackedResponse) Close() { h.Conn.Close() } -// CloseWriter is an interface that implements structs +// CloseWriter is an interface that implement structs // that close input streams to prevent from writing. type CloseWriter interface { CloseWrite() error @@ -148,20 +155,19 @@ type ImageBuildResponse struct { // ImageCreateOptions holds information to create images. type ImageCreateOptions struct { + Parent string // Parent is the name of the image to pull + Tag string // Tag is the name to tag this image with RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry } -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) - SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) -} - // ImageImportOptions holds information to import images from the client host. type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image + Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) + SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) + RepositoryName string // RepositoryName is the name of the repository to import this image into + Message string // Message is the message to tag the image with + Tag string // Tag is the name to tag this image with + Changes []string // Changes are the raw changes to apply to this image } // ImageListOptions holds parameters to filter the list of images with. @@ -173,51 +179,46 @@ type ImageListOptions struct { // ImageLoadResponse returns information to the client about a load process. type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak Body io.ReadCloser JSON bool } // ImagePullOptions holds information to pull images. type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc + ImageID string // ImageID is the name of the image to pull + Tag string // Tag is the name of the tag to be pulled + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry } -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - //ImagePushOptions holds information to push images. type ImagePushOptions ImagePullOptions // ImageRemoveOptions holds parameters to remove images. type ImageRemoveOptions struct { + ImageID string Force bool PruneChildren bool } // ImageSearchOptions holds parameters to search images with. type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args + Term string + RegistryAuth string } // ImageTagOptions holds parameters to tag an image type ImageTagOptions struct { - Force bool + ImageID string + RepositoryName string + Tag string + Force bool } // ResizeOptions holds parameters to resize a tty. // It can be used to resize container ttys and // exec process ttys too. type ResizeOptions struct { + ID string Height int Width int } @@ -228,7 +229,7 @@ type VersionResponse struct { Server *Version } -// ServerOK returns true when the client could connect to the docker server +// ServerOK return true when the client could connect to the docker server // and parse the information received. It returns false otherwise. func (v VersionResponse) ServerOK() bool { return v.Server != nil diff --git a/vendor/github.com/docker/engine-api/types/configs.go b/vendor/github.com/docker/engine-api/types/configs.go index 7d4fcb34..6874a037 100644 --- a/vendor/github.com/docker/engine-api/types/configs.go +++ b/vendor/github.com/docker/engine-api/types/configs.go @@ -38,12 +38,13 @@ type ContainerCommitConfig struct { Config *container.Config } -// ExecConfig is a small subset of the Config struct that holds the configuration +// ExecConfig is a small subset of the Config struct that hold the configuration // for the exec feature of docker. type ExecConfig struct { User string // User that will run the command Privileged bool // Is the container in privileged mode Tty bool // Attach standard streams to a tty. + Container string // Name of the container (to execute in) AttachStdin bool // Attach the standard input, makes possible user interaction AttachStderr bool // Attach the standard output AttachStdout bool // Attach the standard error diff --git a/vendor/github.com/docker/engine-api/types/container/config.go b/vendor/github.com/docker/engine-api/types/container/config.go index 1dfc4083..b8747a50 100644 --- a/vendor/github.com/docker/engine-api/types/container/config.go +++ b/vendor/github.com/docker/engine-api/types/container/config.go @@ -19,6 +19,7 @@ type Config struct { AttachStdout bool // Attach the standard output AttachStderr bool // Attach the standard error ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports + PublishService string `json:",omitempty"` // Name of the network service exposed by the container Tty bool // Attach standard streams to a tty, including stdin if it is not closed. OpenStdin bool // Open stdin StdinOnce bool // If true, close stdin after the 1 attached client disconnects. diff --git a/vendor/github.com/docker/engine-api/types/container/host_config.go b/vendor/github.com/docker/engine-api/types/container/host_config.go index 39f6a225..a1b503f8 100644 --- a/vendor/github.com/docker/engine-api/types/container/host_config.go +++ b/vendor/github.com/docker/engine-api/types/container/host_config.go @@ -25,7 +25,7 @@ func (i Isolation) IsDefault() bool { // IpcMode represents the container ipc stack. type IpcMode string -// IsPrivate indicates whether the container uses its private ipc stack. +// IsPrivate indicates whether the container uses it's private ipc stack. func (n IpcMode) IsPrivate() bool { return !(n.IsHost() || n.IsContainer()) } @@ -89,16 +89,14 @@ func (n UsernsMode) Valid() bool { return true } -// CgroupSpec represents the cgroup to use for the container. +// Cgroup Spec represents the cgroup to use for the container. type CgroupSpec string -// IsContainer indicates whether the container is using another container cgroup func (c CgroupSpec) IsContainer() bool { parts := strings.SplitN(string(c), ":", 2) return len(parts) > 1 && parts[0] == "container" } -// Valid indicates whether the cgroup spec is valid. func (c CgroupSpec) Valid() bool { return c.IsContainer() || c == "" } @@ -115,7 +113,7 @@ func (c CgroupSpec) Container() string { // UTSMode represents the UTS namespace of the container. type UTSMode string -// IsPrivate indicates whether the container uses its private UTS namespace. +// IsPrivate indicates whether the container uses it's private UTS namespace. func (n UTSMode) IsPrivate() bool { return !(n.IsHost()) } @@ -139,7 +137,7 @@ func (n UTSMode) Valid() bool { // PidMode represents the pid stack of the container. type PidMode string -// IsPrivate indicates whether the container uses its private pid stack. +// IsPrivate indicates whether the container uses it's private pid stack. func (n PidMode) IsPrivate() bool { return !(n.IsHost()) } @@ -186,7 +184,7 @@ func (rp *RestartPolicy) IsAlways() bool { } // IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the container will automatically restart of exiting with a non-zero exit status. +// This means the contain will automatically restart of exiting with a non-zero exit status. func (rp *RestartPolicy) IsOnFailure() bool { return rp.Name == "on-failure" } @@ -238,11 +236,11 @@ type Resources struct { Ulimits []*units.Ulimit // List of ulimits to be set in the container // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive - NetworkMaximumBandwidth uint64 // Maximum bandwidth of the network endpoint in bytes per second + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + BlkioIOps uint64 // Maximum IOps for the container system drive + BlkioBps uint64 // Maximum Bytes per second for the container system drive + SandboxSize uint64 // System drive will be expanded to at least this size (in bytes) } // UpdateConfig holds the mutable attributes of a Container. diff --git a/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go b/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go index 0ee332ba..5726a77e 100644 --- a/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go +++ b/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go @@ -32,7 +32,7 @@ func (n NetworkMode) IsHost() bool { return false } -// IsPrivate indicates whether container uses its private network stack. +// IsPrivate indicates whether container uses it's private network stack. func (n NetworkMode) IsPrivate() bool { return !(n.IsHost() || n.IsContainer()) } diff --git a/vendor/github.com/docker/engine-api/types/events/events.go b/vendor/github.com/docker/engine-api/types/events/events.go new file mode 100644 index 00000000..c5987aaf --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/events/events.go @@ -0,0 +1,38 @@ +package events + +const ( + // ContainerEventType is the event type that containers generate + ContainerEventType = "container" + // ImageEventType is the event type that images generate + ImageEventType = "image" + // VolumeEventType is the event type that volumes generate + VolumeEventType = "volume" + // NetworkEventType is the event type that networks generate + NetworkEventType = "network" +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set or attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + + Type string + Action string + Actor Actor + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/vendor/github.com/docker/engine-api/types/filters/parse.go b/vendor/github.com/docker/engine-api/types/filters/parse.go index 0e0d7e38..9c80b1ed 100644 --- a/vendor/github.com/docker/engine-api/types/filters/parse.go +++ b/vendor/github.com/docker/engine-api/types/filters/parse.go @@ -8,14 +8,12 @@ import ( "fmt" "regexp" "strings" - - "github.com/docker/engine-api/types/versions" ) // Args stores filter arguments as map key:{map key: bool}. -// It contains an aggregation of the map of arguments (which are in the form -// of -f 'key=value') based on the key, and stores values for the same key -// in a map with string keys and boolean values. +// It contains a aggregation of the map of arguments (which are in the form +// of -f 'key=value') based on the key, and store values for the same key +// in an map with string keys and boolean values. // e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' // the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} type Args struct { @@ -56,7 +54,7 @@ func ParseFlag(arg string, prev Args) (Args, error) { // ErrBadFormat is an error returned in case of bad format for a filter. var ErrBadFormat = errors.New("bad format of filter (expected name=value)") -// ToParam packs the Args into a string for easy transport from client to server. +// ToParam packs the Args into an string for easy transport from client to server. func ToParam(a Args) (string, error) { // this way we don't URL encode {}, just empty space if a.Len() == 0 { @@ -70,28 +68,6 @@ func ToParam(a Args) (string, error) { return string(buf), nil } -// ToParamWithVersion packs the Args into a string for easy transport from client to server. -// The generated string will depend on the specified version (corresponding to the API version). -func ToParamWithVersion(version string, a Args) (string, error) { - // this way we don't URL encode {}, just empty space - if a.Len() == 0 { - return "", nil - } - - // for daemons older than v1.10, filter must be of the form map[string][]string - buf := []byte{} - err := errors.New("") - if version != "" && versions.LessThan(version, "1.22") { - buf, err = json.Marshal(convertArgsToSlice(a.fields)) - } else { - buf, err = json.Marshal(a.fields) - } - if err != nil { - return "", err - } - return string(buf), nil -} - // FromParam unpacks the filter Args. func FromParam(p string) (Args, error) { if len(p) == 0 { @@ -214,7 +190,7 @@ func (filters Args) ExactMatch(field, source string) bool { return true } - // try to match full name value to avoid O(N) regular expression matching + // try to march full name value to avoid O(N) regular expression matching if fieldValues[source] { return true } @@ -279,17 +255,3 @@ func deprecatedArgs(d map[string][]string) map[string]map[string]bool { } return m } - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/github.com/docker/engine-api/types/reference/image_reference.go b/vendor/github.com/docker/engine-api/types/reference/image_reference.go deleted file mode 100644 index be9cf8eb..00000000 --- a/vendor/github.com/docker/engine-api/types/reference/image_reference.go +++ /dev/null @@ -1,34 +0,0 @@ -package reference - -import ( - distreference "github.com/docker/distribution/reference" -) - -// Parse parses the given references and returns the repository and -// tag (if present) from it. If there is an error during parsing, it will -// return an error. -func Parse(ref string) (string, string, error) { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return "", "", err - } - - tag := GetTagFromNamedRef(distributionRef) - return distributionRef.Name(), tag, nil -} - -// GetTagFromNamedRef returns a tag from the specified reference. -// This function is necessary as long as the docker "server" api makes the distinction between repository -// and tags. -func GetTagFromNamedRef(ref distreference.Named) string { - var tag string - switch x := ref.(type) { - case distreference.Digested: - tag = x.Digest().String() - case distreference.NamedTagged: - tag = x.Tag() - default: - tag = "latest" - } - return tag -} diff --git a/vendor/github.com/docker/engine-api/types/registry/registry.go b/vendor/github.com/docker/engine-api/types/registry/registry.go index d2aca6f0..4fcf986e 100644 --- a/vendor/github.com/docker/engine-api/types/registry/registry.go +++ b/vendor/github.com/docker/engine-api/types/registry/registry.go @@ -78,10 +78,12 @@ type IndexInfo struct { type SearchResult struct { // StarCount indicates the number of stars this repository has StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. + // IsOfficial indicates whether the result is an official repository or not IsOfficial bool `json:"is_official"` // Name is the name of the repository Name string `json:"name"` + // IsOfficial indicates whether the result is trusted + IsTrusted bool `json:"is_trusted"` // IsAutomated indicates whether the result is automated IsAutomated bool `json:"is_automated"` // Description is a textual description of the repository diff --git a/vendor/github.com/docker/engine-api/types/stats.go b/vendor/github.com/docker/engine-api/types/stats.go index b420ebe7..55081ae4 100644 --- a/vendor/github.com/docker/engine-api/types/stats.go +++ b/vendor/github.com/docker/engine-api/types/stats.go @@ -8,7 +8,7 @@ import "time" type ThrottlingData struct { // Number of periods with throttling active Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. + // Number of periods when the container hit its throttling limit. ThrottledPeriods uint64 `json:"throttled_periods"` // Aggregate time the container was throttled for in nanoseconds. ThrottledTime uint64 `json:"throttled_time"` @@ -91,9 +91,6 @@ type NetworkStats struct { type PidsStats struct { // Current is the number of pids in the cgroup Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` } // Stats is Ultimate struct aggregating all types of stats of one container diff --git a/vendor/github.com/docker/engine-api/types/types.go b/vendor/github.com/docker/engine-api/types/types.go index cb2dc9ac..18cfbdda 100644 --- a/vendor/github.com/docker/engine-api/types/types.go +++ b/vendor/github.com/docker/engine-api/types/types.go @@ -251,7 +251,6 @@ type Info struct { ServerVersion string ClusterStore string ClusterAdvertise string - SecurityOptions []string } // PluginsInfo is a temp struct holding Plugins name @@ -290,18 +289,6 @@ type ContainerState struct { FinishedAt string } -// ContainerNode stores information about the node that a container -// is running on. It's only available in Docker Swarm -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int - Labels map[string]string -} - // ContainerJSONBase contains response of Remote API: // GET "/containers/{name:.*}/json" type ContainerJSONBase struct { @@ -315,7 +302,6 @@ type ContainerJSONBase struct { HostnamePath string HostsPath string LogPath string - Node *ContainerNode `json:",omitempty"` Name string RestartCount int Driver string @@ -395,7 +381,6 @@ type Volume struct { Mountpoint string // Mountpoint is the location on disk of the volume Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume Labels map[string]string // Labels is metadata specific to the volume - Scope string // Scope describes the level at which the volume exists (e.g. `global` for cluster-wide or `local` for machine level) } // VolumesListResponse contains the response for the remote API: @@ -439,6 +424,7 @@ type EndpointResource struct { // NetworkCreate is the expected body of the "create network" http request message type NetworkCreate struct { + Name string CheckDuplicate bool Driver string EnableIPv6 bool @@ -448,12 +434,6 @@ type NetworkCreate struct { Labels map[string]string } -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - // NetworkCreateResponse is the response message sent by the server for network create call type NetworkCreateResponse struct { ID string `json:"Id"` diff --git a/vendor/github.com/docker/engine-api/types/versions/compare.go b/vendor/github.com/docker/engine-api/types/versions/compare.go deleted file mode 100644 index 611d4fed..00000000 --- a/vendor/github.com/docker/engine-api/types/versions/compare.go +++ /dev/null @@ -1,62 +0,0 @@ -package versions - -import ( - "strconv" - "strings" -) - -// compare compares two version strings -// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. -func compare(v1, v2 string) int { - var ( - currTab = strings.Split(v1, ".") - otherTab = strings.Split(v2, ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another -func LessThan(v, other string) bool { - return compare(v, other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func LessThanOrEqualTo(v, other string) bool { - return compare(v, other) <= 0 -} - -// GreaterThan checks if a version is greater than another -func GreaterThan(v, other string) bool { - return compare(v, other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to another -func GreaterThanOrEqualTo(v, other string) bool { - return compare(v, other) >= 0 -} - -// Equal checks if a version is equal to another -func Equal(v, other string) bool { - return compare(v, other) == 0 -} diff --git a/vendor/github.com/docker/engine-api/types/versions/v1p19/types.go b/vendor/github.com/docker/engine-api/types/versions/v1p19/types.go new file mode 100644 index 00000000..4ed43358 --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/versions/v1p19/types.go @@ -0,0 +1,35 @@ +// Package v1p19 provides specific API types for the API version 1, patch 19. +package v1p19 + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/versions/v1p20" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for APIs prior to 1.20. +// Note this is not used by the Windows daemon. +type ContainerJSON struct { + *types.ContainerJSONBase + Volumes map[string]string + VolumesRW map[string]bool + Config *ContainerConfig + NetworkSettings *v1p20.NetworkSettings +} + +// ContainerConfig is a backcompatibility struct for APIs prior to 1.20. +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string + Memory int64 + MemorySwap int64 + CPUShares int64 `json:"CpuShares"` + CPUSet string `json:"Cpuset"` +} diff --git a/vendor/github.com/docker/engine-api/types/versions/v1p20/types.go b/vendor/github.com/docker/engine-api/types/versions/v1p20/types.go new file mode 100644 index 00000000..ed800061 --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/versions/v1p20/types.go @@ -0,0 +1,40 @@ +// Package v1p20 provides specific API types for the API version 1, patch 20. +package v1p20 + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for the API 1.20 +type ContainerJSON struct { + *types.ContainerJSONBase + Mounts []types.MountPoint + Config *ContainerConfig + NetworkSettings *NetworkSettings +} + +// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string +} + +// StatsJSON is a backcompatibility struct used in Stats for API prior to 1.21 +type StatsJSON struct { + types.Stats + Network types.NetworkStats `json:"network,omitempty"` +} + +// NetworkSettings is a backward compatible struct for APIs prior to 1.21 +type NetworkSettings struct { + types.NetworkSettingsBase + types.DefaultNetworkSettings +} diff --git a/vendor/github.com/docker/libcompose/docker/auth.go b/vendor/github.com/docker/libcompose/docker/auth.go index aa114635..f5023636 100644 --- a/vendor/github.com/docker/libcompose/docker/auth.go +++ b/vendor/github.com/docker/libcompose/docker/auth.go @@ -16,6 +16,13 @@ type ConfigAuthLookup struct { context *Context } +// NewConfigAuthLookup creates a new ConfigAuthLookup for a given context +func NewConfigAuthLookup(context *Context) *ConfigAuthLookup { + return &ConfigAuthLookup{ + context: context, + } +} + // Lookup uses a Docker config file to lookup authentication information func (c *ConfigAuthLookup) Lookup(repoInfo *registry.RepositoryInfo) types.AuthConfig { if c.context.ConfigFile == nil || repoInfo == nil || repoInfo.Index == nil { diff --git a/vendor/github.com/docker/libcompose/docker/builder/builder.go b/vendor/github.com/docker/libcompose/docker/builder/builder.go index f2120bbb..6b88a36b 100644 --- a/vendor/github.com/docker/libcompose/docker/builder/builder.go +++ b/vendor/github.com/docker/libcompose/docker/builder/builder.go @@ -64,7 +64,8 @@ func (d *DaemonBuilder) Build(ctx context.Context, imageName string) error { outFd, isTerminalOut := term.GetFdInfo(os.Stdout) - response, err := d.Client.ImageBuild(ctx, body, types.ImageBuildOptions{ + response, err := d.Client.ImageBuild(ctx, types.ImageBuildOptions{ + Context: body, Tags: []string{imageName}, NoCache: d.NoCache, Remove: true, diff --git a/vendor/github.com/docker/libcompose/docker/container.go b/vendor/github.com/docker/libcompose/docker/container.go index 3635da0e..d0928460 100644 --- a/vendor/github.com/docker/libcompose/docker/container.go +++ b/vendor/github.com/docker/libcompose/docker/container.go @@ -152,7 +152,8 @@ func (c *Container) Recreate(ctx context.Context, imageName string) (*types.Cont } logrus.Debugf("Created replacement container %s", newContainer.ID) - if err := c.client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{ + if err := c.client.ContainerRemove(ctx, types.ContainerRemoveOptions{ + ContainerID: container.ID, Force: true, RemoveVolumes: false, }); err != nil { @@ -241,7 +242,8 @@ func (c *Container) Delete(ctx context.Context, removeVolume bool) error { } if !info.State.Running { - return c.client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{ + return c.client.ContainerRemove(ctx, types.ContainerRemoveOptions{ + ContainerID: container.ID, Force: true, RemoveVolumes: removeVolume, }) @@ -291,13 +293,14 @@ func (c *Container) Run(ctx context.Context, imageName string, configOverride *c } options := types.ContainerAttachOptions{ - Stream: true, - Stdin: configOverride.StdinOpen, - Stdout: configOverride.Tty, - Stderr: configOverride.Tty, + ContainerID: container.ID, + Stream: true, + Stdin: configOverride.StdinOpen, + Stdout: configOverride.Tty, + Stderr: configOverride.Tty, } - resp, err := c.client.ContainerAttach(ctx, container.ID, options) + resp, err := c.client.ContainerAttach(ctx, options) if err != nil { return -1, err } @@ -632,12 +635,13 @@ func (c *Container) Log(ctx context.Context, follow bool) error { l := c.loggerFactory.Create(name) options := types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Follow: follow, - Tail: "all", + ContainerID: c.name, + ShowStdout: true, + ShowStderr: true, + Follow: follow, + Tail: "all", } - responseBody, err := c.client.ContainerLogs(ctx, c.name, options) + responseBody, err := c.client.ContainerLogs(ctx, options) if err != nil { return err } diff --git a/vendor/github.com/docker/libcompose/docker/context.go b/vendor/github.com/docker/libcompose/docker/context.go index 1c602d87..5a35e677 100644 --- a/vendor/github.com/docker/libcompose/docker/context.go +++ b/vendor/github.com/docker/libcompose/docker/context.go @@ -2,7 +2,6 @@ package docker import ( "github.com/docker/docker/cliconfig" - "github.com/docker/docker/cliconfig/configfile" "github.com/docker/libcompose/project" ) @@ -12,7 +11,7 @@ type Context struct { project.Context ClientFactory project.ClientFactory ConfigDir string - ConfigFile *configfile.ConfigFile + ConfigFile *cliconfig.ConfigFile AuthLookup AuthLookup } diff --git a/vendor/github.com/docker/libcompose/docker/image.go b/vendor/github.com/docker/libcompose/docker/image.go index 95e3331a..273fc646 100644 --- a/vendor/github.com/docker/libcompose/docker/image.go +++ b/vendor/github.com/docker/libcompose/docker/image.go @@ -19,7 +19,9 @@ import ( ) func removeImage(ctx context.Context, client client.APIClient, image string) error { - _, err := client.ImageRemove(ctx, image, types.ImageRemoveOptions{}) + _, err := client.ImageRemove(ctx, types.ImageRemoveOptions{ + ImageID: image, + }) return err } @@ -43,9 +45,18 @@ func pullImage(ctx context.Context, client client.APIClient, service *Service, i } options := types.ImagePullOptions{ + ImageID: distributionRef.String(), + Tag: "latest", RegistryAuth: encodedAuth, } - responseBody, err := client.ImagePull(ctx, distributionRef.String(), options) + if named, ok := distributionRef.(reference.Named); ok { + options.ImageID = named.FullName() + } + if tagged, ok := distributionRef.(reference.NamedTagged); ok { + options.Tag = tagged.Tag() + } + + responseBody, err := client.ImagePull(ctx, options, nil) if err != nil { logrus.Errorf("Failed to pull image %s: %v", image, err) return err diff --git a/vendor/github.com/docker/libcompose/docker/project.go b/vendor/github.com/docker/libcompose/docker/project.go index 8fdd2bcf..79766999 100644 --- a/vendor/github.com/docker/libcompose/docker/project.go +++ b/vendor/github.com/docker/libcompose/docker/project.go @@ -36,7 +36,7 @@ func NewProject(context *Context, parseOptions *config.ParseOptions) (*project.P } if context.AuthLookup == nil { - context.AuthLookup = &ConfigAuthLookup{context} + context.AuthLookup = NewConfigAuthLookup(context) } if context.ServiceFactory == nil { diff --git a/vendor/github.com/docker/libcompose/project/project.go b/vendor/github.com/docker/libcompose/project/project.go index 6ce7edec..033a690f 100644 --- a/vendor/github.com/docker/libcompose/project/project.go +++ b/vendor/github.com/docker/libcompose/project/project.go @@ -278,8 +278,9 @@ func (p *Project) removeOrphanContainers() error { if err := client.ContainerKill(context.Background(), container.ID, "SIGKILL"); err != nil { return err } - if err := client.ContainerRemove(context.Background(), container.ID, types.ContainerRemoveOptions{ - Force: true, + if err := client.ContainerRemove(context.Background(), types.ContainerRemoveOptions{ + ContainerID: container.ID, + Force: true, }); err != nil { return err } diff --git a/vendor/github.com/docker/libcontainer/.gitignore b/vendor/github.com/docker/libcontainer/.gitignore deleted file mode 100644 index 2e3f79b4..00000000 --- a/vendor/github.com/docker/libcontainer/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -bundles -nsinit/nsinit -vendor/pkg diff --git a/vendor/github.com/docker/libcontainer/CONTRIBUTING.md b/vendor/github.com/docker/libcontainer/CONTRIBUTING.md deleted file mode 100644 index 667cc5a6..00000000 --- a/vendor/github.com/docker/libcontainer/CONTRIBUTING.md +++ /dev/null @@ -1,257 +0,0 @@ -# The libcontainer Contributors' Guide - -Want to hack on libcontainer? Awesome! Here are instructions to get you -started. They are probably not perfect, please let us know if anything -feels wrong or incomplete. - -## Reporting Issues - -When reporting [issues](https://github.com/docker/libcontainer/issues) -on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc), -the output of `uname -a`. Please include the steps required to reproduce -the problem if possible and applicable. -This information will help us review and fix your issue faster. - -## Development Environment - -### Requirements - -For best results, use a Linux development environment. -The following packages are required to compile libcontainer natively. - -- Golang 1.3 -- GCC -- git -- cgutils - -You can develop on OSX, but you are limited to Dockerfile-based builds only. - -### Building libcontainer from Dockerfile - - make all - -This is the easiest way of building libcontainer. -As this build is done using Docker, you can even run this from [OSX](https://github.com/boot2docker/boot2docker) - -### Testing changes with "nsinit" - - make sh - -This will create an container that runs `nsinit exec sh` on a busybox rootfs with the configuration from ['minimal.json'](https://github.com/docker/libcontainer/blob/master/sample_configs/minimal.json). -Like the previous command, you can run this on OSX too! - -### Building libcontainer directly - -> Note: You should add the `vendor` directory to your GOPATH to use the vendored libraries - - ./update-vendor.sh - go get -d ./... - make direct-build - # Run the tests - make direct-test-short | egrep --color 'FAIL|$' - # Run all the test - make direct-test | egrep --color 'FAIL|$' - -### Testing Changes with "nsinit" directly - -To test a change: - - # Install nsinit - make direct-install - - # Optional, add a docker0 bridge - ip link add docker0 type bridge - ifconfig docker0 172.17.0.1/16 up - - mkdir testfs - curl -sSL https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.02/rootfs.tar | tar -xC testfs - cd testfs - cp container.json - nsinit exec sh - -## Contribution Guidelines - -### Pull requests are always welcome - -We are always thrilled to receive pull requests, and do our best to -process them as fast as possible. Not sure if that typo is worth a pull -request? Do it! We will appreciate it. - -If your pull request is not accepted on the first try, don't be -discouraged! If there's a problem with the implementation, hopefully you -received feedback on what to improve. - -We're trying very hard to keep libcontainer lean and focused. We don't want it -to do everything for everybody. This means that we might decide against -incorporating a new feature. However, there might be a way to implement -that feature *on top of* libcontainer. - -### Discuss your design on the mailing list - -We recommend discussing your plans [on the mailing -list](https://groups.google.com/forum/?fromgroups#!forum/libcontainer) -before starting to code - especially for more ambitious contributions. -This gives other contributors a chance to point you in the right -direction, give feedback on your design, and maybe point out if someone -else is working on the same thing. - -### Create issues... - -Any significant improvement should be documented as [a GitHub -issue](https://github.com/docker/libcontainer/issues) before anybody -starts working on it. - -### ...but check for existing issues first! - -Please take a moment to check that an issue doesn't already exist -documenting your bug report or improvement proposal. If it does, it -never hurts to add a quick "+1" or "I have this problem too". This will -help prioritize the most common problems and requests. - -### Conventions - -Fork the repo and make changes on your fork in a feature branch: - -- If it's a bugfix branch, name it XXX-something where XXX is the number of the - issue -- If it's a feature branch, create an enhancement issue to announce your - intentions, and name it XXX-something where XXX is the number of the issue. - -Submit unit tests for your changes. Go has a great test framework built in; use -it! Take a look at existing tests for inspiration. Run the full test suite on -your branch before submitting a pull request. - -Update the documentation when creating or modifying features. Test -your documentation changes for clarity, concision, and correctness, as -well as a clean documentation build. See ``docs/README.md`` for more -information on building the docs and how docs get released. - -Write clean code. Universally formatted code promotes ease of writing, reading, -and maintenance. Always run `gofmt -s -w file.go` on each changed file before -committing your changes. Most editors have plugins that do this automatically. - -Pull requests descriptions should be as clear as possible and include a -reference to all the issues that they address. - -Pull requests must not contain commits from other users or branches. - -Commit messages must start with a capitalized and short summary (max. 50 -chars) written in the imperative, followed by an optional, more detailed -explanatory text which is separated from the summary by an empty line. - -Code review comments may be added to your pull request. Discuss, then make the -suggested modifications and push additional commits to your feature branch. Be -sure to post a comment after pushing. The new commits will show up in the pull -request automatically, but the reviewers will not be notified unless you -comment. - -Before the pull request is merged, make sure that you squash your commits into -logical units of work using `git rebase -i` and `git push -f`. After every -commit the test suite should be passing. Include documentation changes in the -same commit so that a revert would remove all traces of the feature or fix. - -Commits that fix or close an issue should include a reference like `Closes #XXX` -or `Fixes #XXX`, which will automatically close the issue when merged. - -### Testing - -Make sure you include suitable tests, preferably unit tests, in your pull request -and that all the tests pass. - -*Instructions for running tests to be added.* - -### Merge approval - -libcontainer maintainers use LGTM (looks good to me) in comments on the code review -to indicate acceptance. - -A change requires LGTMs from at lease two maintainers. One of those must come from -a maintainer of the component affected. For example, if a change affects `netlink/` -and `security`, it needs at least one LGTM from a maintainer of each. Maintainers -only need one LGTM as presumably they LGTM their own change. - -For more details see [MAINTAINERS.md](MAINTAINERS.md) - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the -patch, which certifies that you wrote it or otherwise have the right to -pass it on as an open-source patch. The rules are pretty simple: if you -can certify the below (from -[developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -then you just add a line to every git commit message: - - Docker-DCO-1.1-Signed-off-by: Joe Smith (github: github_handle) - -using your real name (sorry, no pseudonyms or anonymous contributions.) - -One way to automate this, is customise your get ``commit.template`` by adding -a ``prepare-commit-msg`` hook to your libcontainer checkout: - -``` -curl -o .git/hooks/prepare-commit-msg https://raw.githubusercontent.com/docker/docker/master/contrib/prepare-commit-msg.hook && chmod +x .git/hooks/prepare-commit-msg -``` - -* Note: the above script expects to find your GitHub user name in ``git config --get github.user`` - -#### Small patch exception - -There are several exceptions to the signing requirement. Currently these are: - -* Your patch fixes spelling or grammar errors. -* Your patch is a single line change to documentation contained in the - `docs` directory. -* Your patch fixes Markdown formatting or syntax errors in the - documentation contained in the `docs` directory. - -If you have any questions, please refer to the FAQ in the [docs](to be written) - -### How can I become a maintainer? - -* Step 1: learn the component inside out -* Step 2: make yourself useful by contributing code, bugfixes, support etc. -* Step 3: volunteer on the irc channel (#libcontainer@freenode) - -Don't forget: being a maintainer is a time investment. Make sure you will have time to make yourself available. -You don't have to be a maintainer to make a difference on the project! - diff --git a/vendor/github.com/docker/libcontainer/Dockerfile b/vendor/github.com/docker/libcontainer/Dockerfile deleted file mode 100644 index a8854988..00000000 --- a/vendor/github.com/docker/libcontainer/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -FROM golang:1.4 - -RUN echo "deb http://ftp.us.debian.org/debian testing main contrib" >> /etc/apt/sources.list -RUN apt-get update && apt-get install -y iptables criu=1.5.2-1 && rm -rf /var/lib/apt/lists/* - -RUN go get golang.org/x/tools/cmd/cover - -ENV GOPATH $GOPATH:/go/src/github.com/docker/libcontainer/vendor -RUN go get github.com/docker/docker/pkg/term - -# setup a playground for us to spawn containers in -RUN mkdir /busybox && \ - curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.11/rootfs.tar' | tar -xC /busybox - -RUN curl -sSL https://raw.githubusercontent.com/docker/docker/master/hack/dind -o /dind && \ - chmod +x /dind - -COPY . /go/src/github.com/docker/libcontainer -WORKDIR /go/src/github.com/docker/libcontainer -RUN cp sample_configs/minimal.json /busybox/container.json - -RUN make direct-install - -ENTRYPOINT ["/dind"] -CMD ["make", "direct-test"] diff --git a/vendor/github.com/docker/libcontainer/MAINTAINERS b/vendor/github.com/docker/libcontainer/MAINTAINERS deleted file mode 100644 index cea3500f..00000000 --- a/vendor/github.com/docker/libcontainer/MAINTAINERS +++ /dev/null @@ -1,7 +0,0 @@ -Michael Crosby (@crosbymichael) -Rohit Jnagal (@rjnagal) -Victor Marmol (@vmarmol) -Mrunal Patel (@mrunalp) -Alexandr Morozov (@LK4D4) -Daniel, Dao Quang Minh (@dqminh) -update-vendor.sh: Tianon Gravi (@tianon) diff --git a/vendor/github.com/docker/libcontainer/MAINTAINERS_GUIDE.md b/vendor/github.com/docker/libcontainer/MAINTAINERS_GUIDE.md deleted file mode 100644 index 2ac9ca21..00000000 --- a/vendor/github.com/docker/libcontainer/MAINTAINERS_GUIDE.md +++ /dev/null @@ -1,99 +0,0 @@ -# The libcontainer Maintainers' Guide - -## Introduction - -Dear maintainer. Thank you for investing the time and energy to help -make libcontainer as useful as possible. Maintaining a project is difficult, -sometimes unrewarding work. Sure, you will get to contribute cool -features to the project. But most of your time will be spent reviewing, -cleaning up, documenting, answering questions, justifying design -decisions - while everyone has all the fun! But remember - the quality -of the maintainers work is what distinguishes the good projects from the -great. So please be proud of your work, even the unglamourous parts, -and encourage a culture of appreciation and respect for *every* aspect -of improving the project - not just the hot new features. - -This document is a manual for maintainers old and new. It explains what -is expected of maintainers, how they should work, and what tools are -available to them. - -This is a living document - if you see something out of date or missing, -speak up! - -## What are a maintainer's responsibility? - -It is every maintainer's responsibility to: - -* 1) Expose a clear roadmap for improving their component. -* 2) Deliver prompt feedback and decisions on pull requests. -* 3) Be available to anyone with questions, bug reports, criticism etc. - on their component. This includes IRC, GitHub requests and the mailing - list. -* 4) Make sure their component respects the philosophy, design and - roadmap of the project. - -## How are decisions made? - -Short answer: with pull requests to the libcontainer repository. - -libcontainer is an open-source project with an open design philosophy. This -means that the repository is the source of truth for EVERY aspect of the -project, including its philosophy, design, roadmap and APIs. *If it's -part of the project, it's in the repo. It's in the repo, it's part of -the project.* - -As a result, all decisions can be expressed as changes to the -repository. An implementation change is a change to the source code. An -API change is a change to the API specification. A philosophy change is -a change to the philosophy manifesto. And so on. - -All decisions affecting libcontainer, big and small, follow the same 3 steps: - -* Step 1: Open a pull request. Anyone can do this. - -* Step 2: Discuss the pull request. Anyone can do this. - -* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do -this (see below "Who decides what?") - - -## Who decides what? - -All decisions are pull requests, and the relevant maintainers make -decisions by accepting or refusing the pull request. Review and acceptance -by anyone is denoted by adding a comment in the pull request: `LGTM`. -However, only currently listed `MAINTAINERS` are counted towards the required -two LGTMs. - -libcontainer follows the timeless, highly efficient and totally unfair system -known as [Benevolent dictator for life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with Michael Crosby in the role of BDFL. -This means that all decisions are made by default by Michael. Since making -every decision himself would be highly un-scalable, in practice decisions -are spread across multiple maintainers. - -The relevant maintainers for a pull request can be worked out in two steps: - -* Step 1: Determine the subdirectories affected by the pull request. This - might be `netlink/` and `security/`, or any other part of the repo. - -* Step 2: Find the `MAINTAINERS` file which affects this directory. If the - directory itself does not have a `MAINTAINERS` file, work your way up - the repo hierarchy until you find one. - -### I'm a maintainer, and I'm going on holiday - -Please let your co-maintainers and other contributors know by raising a pull -request that comments out your `MAINTAINERS` file entry using a `#`. - -### I'm a maintainer, should I make pull requests too? - -Yes. Nobody should ever push to master directly. All changes should be -made through a pull request. - -### Who assigns maintainers? - -Michael has final `LGTM` approval for all pull requests to `MAINTAINERS` files. - -### How is this process changed? - -Just like everything else: by making a pull request :) diff --git a/vendor/github.com/docker/libcontainer/Makefile b/vendor/github.com/docker/libcontainer/Makefile deleted file mode 100644 index 1a2e23e0..00000000 --- a/vendor/github.com/docker/libcontainer/Makefile +++ /dev/null @@ -1,33 +0,0 @@ - -all: - docker build -t dockercore/libcontainer . - -test: - # we need NET_ADMIN for the netlink tests and SYS_ADMIN for mounting - docker run --rm -it --privileged dockercore/libcontainer - -sh: - docker run --rm -it --privileged -w /busybox dockercore/libcontainer nsinit exec sh - -GO_PACKAGES = $(shell find . -not \( -wholename ./vendor -prune -o -wholename ./.git -prune \) -name '*.go' -print0 | xargs -0n1 dirname | sort -u) - -direct-test: - go test $(TEST_TAGS) -cover -v $(GO_PACKAGES) - -direct-test-short: - go test $(TEST_TAGS) -cover -test.short -v $(GO_PACKAGES) - -direct-build: - go build -v $(GO_PACKAGES) - -direct-install: - go install -v $(GO_PACKAGES) - -local: - go test -v - -validate: - hack/validate.sh - -binary: all - docker run --rm --privileged -v $(CURDIR)/bundles:/go/bin dockercore/libcontainer make direct-install diff --git a/vendor/github.com/docker/libcontainer/NOTICE b/vendor/github.com/docker/libcontainer/NOTICE deleted file mode 100644 index dc912987..00000000 --- a/vendor/github.com/docker/libcontainer/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -libcontainer -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (http://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see http://www.bis.doc.gov - -See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/libcontainer/PRINCIPLES.md b/vendor/github.com/docker/libcontainer/PRINCIPLES.md deleted file mode 100644 index 05606421..00000000 --- a/vendor/github.com/docker/libcontainer/PRINCIPLES.md +++ /dev/null @@ -1,19 +0,0 @@ -# libcontainer Principles - -In the design and development of libcontainer we try to follow these principles: - -(Work in progress) - -* Don't try to replace every tool. Instead, be an ingredient to improve them. -* Less code is better. -* Fewer components are better. Do you really need to add one more class? -* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. -* Don't do later what you can do now. "//TODO: refactor" is not acceptable in new code. -* When hesitating between two options, choose the one that is easier to reverse. -* "No" is temporary; "Yes" is forever. If you're not sure about a new feature, say no. You can change your mind later. -* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. -* The fewer moving parts in a container, the better. -* Don't merge it unless you document it. -* Don't document it unless you can keep it up-to-date. -* Don't merge it unless you test it! -* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. diff --git a/vendor/github.com/docker/libcontainer/README.md b/vendor/github.com/docker/libcontainer/README.md deleted file mode 100644 index 20ae1155..00000000 --- a/vendor/github.com/docker/libcontainer/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# NOTICE - -Please refer to https://github.com/opencontainers/runc/tree/master/libcontainer to make contributions to libcontainer and `runc`. - -For more information about the open container project and foundation please refer to: - -http://blog.docker.com/2015/06/open-container-project-foundation/ diff --git a/vendor/github.com/docker/libcontainer/ROADMAP.md b/vendor/github.com/docker/libcontainer/ROADMAP.md deleted file mode 100644 index f5903535..00000000 --- a/vendor/github.com/docker/libcontainer/ROADMAP.md +++ /dev/null @@ -1,20 +0,0 @@ -# libcontainer: what's next? - -This document is a high-level overview of where we want to take libcontainer next. -It is a curated selection of planned improvements which are either important, difficult, or both. - -For a more complete view of planned and requested improvements, see [the Github issues](https://github.com/docker/libcontainer/issues). - -To suggest changes to the roadmap, including additions, please write the change as if it were already in effect, and make a pull request. - -## Broader kernel support - -Our goal is to make libcontainer run everywhere, but currently libcontainer requires Linux version 3.8 or higher. If you’re deploying new machines for the purpose of running libcontainer, this is a fairly easy requirement to meet. However, if you’re adding libcontainer to an existing deployment, you may not have the flexibility to update and patch the kernel. - -## Cross-architecture support - -Our goal is to make libcontainer run everywhere. Recently libcontainer has -expanded from its initial support for x86_64 systems to include POWER (ppc64 -little and big endian variants), IBM System z (s390x 64-bit), and ARM. We plan -to continue expanding architecture support such that libcontainer containers -can be created and used on more architectures. diff --git a/vendor/github.com/docker/libcontainer/SPEC.md b/vendor/github.com/docker/libcontainer/SPEC.md deleted file mode 100644 index 060f07da..00000000 --- a/vendor/github.com/docker/libcontainer/SPEC.md +++ /dev/null @@ -1,334 +0,0 @@ -## Container Specification - v1 - -This is the standard configuration for version 1 containers. It includes -namespaces, standard filesystem setup, a default Linux capability set, and -information about resource reservations. It also has information about any -populated environment settings for the processes running inside a container. - -Along with the configuration of how a container is created the standard also -discusses actions that can be performed on a container to manage and inspect -information about the processes running inside. - -The v1 profile is meant to be able to accommodate the majority of applications -with a strong security configuration. - -### System Requirements and Compatibility - -Minimum requirements: -* Kernel version - 3.10 recommended 2.6.2x minimum(with backported patches) -* Mounted cgroups with each subsystem in its own hierarchy - - -### Namespaces - -| Flag | Enabled | -| ------------ | ------- | -| CLONE_NEWPID | 1 | -| CLONE_NEWUTS | 1 | -| CLONE_NEWIPC | 1 | -| CLONE_NEWNET | 1 | -| CLONE_NEWNS | 1 | -| CLONE_NEWUSER | 1 | - -Namespaces are created for the container via the `clone` syscall. - - -### Filesystem - -A root filesystem must be provided to a container for execution. The container -will use this root filesystem (rootfs) to jail and spawn processes inside where -the binaries and system libraries are local to that directory. Any binaries -to be executed must be contained within this rootfs. - -Mounts that happen inside the container are automatically cleaned up when the -container exits as the mount namespace is destroyed and the kernel will -unmount all the mounts that were setup within that namespace. - -For a container to execute properly there are certain filesystems that -are required to be mounted within the rootfs that the runtime will setup. - -| Path | Type | Flags | Data | -| ----------- | ------ | -------------------------------------- | ---------------------------------------- | -| /proc | proc | MS_NOEXEC,MS_NOSUID,MS_NODEV | | -| /dev | tmpfs | MS_NOEXEC,MS_STRICTATIME | mode=755 | -| /dev/shm | tmpfs | MS_NOEXEC,MS_NOSUID,MS_NODEV | mode=1777,size=65536k | -| /dev/mqueue | mqueue | MS_NOEXEC,MS_NOSUID,MS_NODEV | | -| /dev/pts | devpts | MS_NOEXEC,MS_NOSUID | newinstance,ptmxmode=0666,mode=620,gid=5 | -| /sys | sysfs | MS_NOEXEC,MS_NOSUID,MS_NODEV,MS_RDONLY | | - - -After a container's filesystems are mounted within the newly created -mount namespace `/dev` will need to be populated with a set of device nodes. -It is expected that a rootfs does not need to have any device nodes specified -for `/dev` witin the rootfs as the container will setup the correct devices -that are required for executing a container's process. - -| Path | Mode | Access | -| ------------ | ---- | ---------- | -| /dev/null | 0666 | rwm | -| /dev/zero | 0666 | rwm | -| /dev/full | 0666 | rwm | -| /dev/tty | 0666 | rwm | -| /dev/random | 0666 | rwm | -| /dev/urandom | 0666 | rwm | -| /dev/fuse | 0666 | rwm | - - -**ptmx** -`/dev/ptmx` will need to be a symlink to the host's `/dev/ptmx` within -the container. - -The use of a pseudo TTY is optional within a container and it should support both. -If a pseudo is provided to the container `/dev/console` will need to be -setup by binding the console in `/dev/` after it has been populated and mounted -in tmpfs. - -| Source | Destination | UID GID | Mode | Type | -| --------------- | ------------ | ------- | ---- | ---- | -| *pty host path* | /dev/console | 0 0 | 0600 | bind | - - -After `/dev/null` has been setup we check for any external links between -the container's io, STDIN, STDOUT, STDERR. If the container's io is pointing -to `/dev/null` outside the container we close and `dup2` the the `/dev/null` -that is local to the container's rootfs. - - -After the container has `/proc` mounted a few standard symlinks are setup -within `/dev/` for the io. - -| Source | Destination | -| ------------ | ----------- | -| /proc/1/fd | /dev/fd | -| /proc/1/fd/0 | /dev/stdin | -| /proc/1/fd/1 | /dev/stdout | -| /proc/1/fd/2 | /dev/stderr | - -A `pivot_root` is used to change the root for the process, effectively -jailing the process inside the rootfs. - -```c -put_old = mkdir(...); -pivot_root(rootfs, put_old); -chdir("/"); -umount(put_old, MNT_DETACH); -rmdir(put_old); -``` - -For container's running with a rootfs inside `ramfs` a `MS_MOVE` combined -with a `chroot` is required as `pivot_root` is not supported in `ramfs`. - -```c -mount(rootfs, "/", NULL, MS_MOVE, NULL); -chroot("."); -chdir("/"); -``` - -The `umask` is set back to `0022` after the filesystem setup has been completed. - -### Resources - -Cgroups are used to handle resource allocation for containers. This includes -system resources like cpu, memory, and device access. - -| Subsystem | Enabled | -| ---------- | ------- | -| devices | 1 | -| memory | 1 | -| cpu | 1 | -| cpuacct | 1 | -| cpuset | 1 | -| blkio | 1 | -| perf_event | 1 | -| freezer | 1 | -| hugetlb | 1 | - - -All cgroup subsystem are joined so that statistics can be collected from -each of the subsystems. Freezer does not expose any stats but is joined -so that containers can be paused and resumed. - -The parent process of the container's init must place the init pid inside -the correct cgroups before the initialization begins. This is done so -that no processes or threads escape the cgroups. This sync is -done via a pipe ( specified in the runtime section below ) that the container's -init process will block waiting for the parent to finish setup. - -### Security - -The standard set of Linux capabilities that are set in a container -provide a good default for security and flexibility for the applications. - - -| Capability | Enabled | -| -------------------- | ------- | -| CAP_NET_RAW | 1 | -| CAP_NET_BIND_SERVICE | 1 | -| CAP_AUDIT_READ | 1 | -| CAP_AUDIT_WRITE | 1 | -| CAP_DAC_OVERRIDE | 1 | -| CAP_SETFCAP | 1 | -| CAP_SETPCAP | 1 | -| CAP_SETGID | 1 | -| CAP_SETUID | 1 | -| CAP_MKNOD | 1 | -| CAP_CHOWN | 1 | -| CAP_FOWNER | 1 | -| CAP_FSETID | 1 | -| CAP_KILL | 1 | -| CAP_SYS_CHROOT | 1 | -| CAP_NET_BROADCAST | 0 | -| CAP_SYS_MODULE | 0 | -| CAP_SYS_RAWIO | 0 | -| CAP_SYS_PACCT | 0 | -| CAP_SYS_ADMIN | 0 | -| CAP_SYS_NICE | 0 | -| CAP_SYS_RESOURCE | 0 | -| CAP_SYS_TIME | 0 | -| CAP_SYS_TTY_CONFIG | 0 | -| CAP_AUDIT_CONTROL | 0 | -| CAP_MAC_OVERRIDE | 0 | -| CAP_MAC_ADMIN | 0 | -| CAP_NET_ADMIN | 0 | -| CAP_SYSLOG | 0 | -| CAP_DAC_READ_SEARCH | 0 | -| CAP_LINUX_IMMUTABLE | 0 | -| CAP_IPC_LOCK | 0 | -| CAP_IPC_OWNER | 0 | -| CAP_SYS_PTRACE | 0 | -| CAP_SYS_BOOT | 0 | -| CAP_LEASE | 0 | -| CAP_WAKE_ALARM | 0 | -| CAP_BLOCK_SUSPE | 0 | - - -Additional security layers like [apparmor](https://wiki.ubuntu.com/AppArmor) -and [selinux](http://selinuxproject.org/page/Main_Page) can be used with -the containers. A container should support setting an apparmor profile or -selinux process and mount labels if provided in the configuration. - -Standard apparmor profile: -```c -#include -profile flags=(attach_disconnected,mediate_deleted) { - #include - network, - capability, - file, - umount, - - deny @{PROC}/sys/fs/** wklx, - deny @{PROC}/sysrq-trigger rwklx, - deny @{PROC}/mem rwklx, - deny @{PROC}/kmem rwklx, - deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx, - deny @{PROC}/sys/kernel/*/** wklx, - - deny mount, - - deny /sys/[^f]*/** wklx, - deny /sys/f[^s]*/** wklx, - deny /sys/fs/[^c]*/** wklx, - deny /sys/fs/c[^g]*/** wklx, - deny /sys/fs/cg[^r]*/** wklx, - deny /sys/firmware/efi/efivars/** rwklx, - deny /sys/kernel/security/** rwklx, -} -``` - -*TODO: seccomp work is being done to find a good default config* - -### Runtime and Init Process - -During container creation the parent process needs to talk to the container's init -process and have a form of synchronization. This is accomplished by creating -a pipe that is passed to the container's init. When the init process first spawns -it will block on its side of the pipe until the parent closes its side. This -allows the parent to have time to set the new process inside a cgroup hierarchy -and/or write any uid/gid mappings required for user namespaces. -The pipe is passed to the init process via FD 3. - -The application consuming libcontainer should be compiled statically. libcontainer -does not define any init process and the arguments provided are used to `exec` the -process inside the application. There should be no long running init within the -container spec. - -If a pseudo tty is provided to a container it will open and `dup2` the console -as the container's STDIN, STDOUT, STDERR as well as mounting the console -as `/dev/console`. - -An extra set of mounts are provided to a container and setup for use. A container's -rootfs can contain some non portable files inside that can cause side effects during -execution of a process. These files are usually created and populated with the container -specific information via the runtime. - -**Extra runtime files:** -* /etc/hosts -* /etc/resolv.conf -* /etc/hostname -* /etc/localtime - - -#### Defaults - -There are a few defaults that can be overridden by users, but in their omission -these apply to processes within a container. - -| Type | Value | -| ------------------- | ------------------------------ | -| Parent Death Signal | SIGKILL | -| UID | 0 | -| GID | 0 | -| GROUPS | 0, NULL | -| CWD | "/" | -| $HOME | Current user's home dir or "/" | -| Readonly rootfs | false | -| Pseudo TTY | false | - - -## Actions - -After a container is created there is a standard set of actions that can -be done to the container. These actions are part of the public API for -a container. - -| Action | Description | -| -------------- | ------------------------------------------------------------------ | -| Get processes | Return all the pids for processes running inside a container | -| Get Stats | Return resource statistics for the container as a whole | -| Wait | Wait waits on the container's init process ( pid 1 ) | -| Wait Process | Wait on any of the container's processes returning the exit status | -| Destroy | Kill the container's init process and remove any filesystem state | -| Signal | Send a signal to the container's init process | -| Signal Process | Send a signal to any of the container's processes | -| Pause | Pause all processes inside the container | -| Resume | Resume all processes inside the container if paused | -| Exec | Execute a new process inside of the container ( requires setns ) | -| Set | Setup configs of the container after it's created | - -### Execute a new process inside of a running container. - -User can execute a new process inside of a running container. Any binaries to be -executed must be accessible within the container's rootfs. - -The started process will run inside the container's rootfs. Any changes -made by the process to the container's filesystem will persist after the -process finished executing. - -The started process will join all the container's existing namespaces. When the -container is paused, the process will also be paused and will resume when -the container is unpaused. The started process will only run when the container's -primary process (PID 1) is running, and will not be restarted when the container -is restarted. - -#### Planned additions - -The started process will have its own cgroups nested inside the container's -cgroups. This is used for process tracking and optionally resource allocation -handling for the new process. Freezer cgroup is required, the rest of the cgroups -are optional. The process executor must place its pid inside the correct -cgroups before starting the process. This is done so that no child processes or -threads can escape the cgroups. - -When the process is stopped, the process executor will try (in a best-effort way) -to stop all its children and remove the sub-cgroups. diff --git a/vendor/github.com/docker/libcontainer/netlink/MAINTAINERS b/vendor/github.com/docker/libcontainer/netlink/MAINTAINERS deleted file mode 100644 index 1cb55136..00000000 --- a/vendor/github.com/docker/libcontainer/netlink/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Michael Crosby (@crosbymichael) -Guillaume J. Charmes (@creack) diff --git a/vendor/github.com/docker/libcontainer/netlink/netlink.go b/vendor/github.com/docker/libcontainer/netlink/netlink.go deleted file mode 100644 index 90883660..00000000 --- a/vendor/github.com/docker/libcontainer/netlink/netlink.go +++ /dev/null @@ -1,31 +0,0 @@ -// Packet netlink provide access to low level Netlink sockets and messages. -// -// Actual implementations are in: -// netlink_linux.go -// netlink_darwin.go -package netlink - -import ( - "errors" - "net" -) - -var ( - ErrWrongSockType = errors.New("Wrong socket type") - ErrShortResponse = errors.New("Got short response from netlink") - ErrInterfaceExists = errors.New("Network interface already exists") -) - -// A Route is a subnet associated with the interface to reach it. -type Route struct { - *net.IPNet - Iface *net.Interface - Default bool -} - -// An IfAddr defines IP network settings for a given network interface -type IfAddr struct { - Iface *net.Interface - IP net.IP - IPNet *net.IPNet -} diff --git a/vendor/github.com/docker/libcontainer/netlink/netlink_linux.go b/vendor/github.com/docker/libcontainer/netlink/netlink_linux.go deleted file mode 100644 index c81c269e..00000000 --- a/vendor/github.com/docker/libcontainer/netlink/netlink_linux.go +++ /dev/null @@ -1,1321 +0,0 @@ -package netlink - -import ( - "encoding/binary" - "fmt" - "io" - "math/rand" - "net" - "os" - "sync/atomic" - "syscall" - "time" - "unsafe" -) - -const ( - IFNAMSIZ = 16 - DEFAULT_CHANGE = 0xFFFFFFFF - IFLA_INFO_KIND = 1 - IFLA_INFO_DATA = 2 - VETH_INFO_PEER = 1 - IFLA_MACVLAN_MODE = 1 - IFLA_VLAN_ID = 1 - IFLA_NET_NS_FD = 28 - IFLA_ADDRESS = 1 - IFLA_BRPORT_MODE = 4 - SIOC_BRADDBR = 0x89a0 - SIOC_BRDELBR = 0x89a1 - SIOC_BRADDIF = 0x89a2 - SIOC_BRDELIF = 0x89a3 -) - -const ( - MACVLAN_MODE_PRIVATE = 1 << iota - MACVLAN_MODE_VEPA - MACVLAN_MODE_BRIDGE - MACVLAN_MODE_PASSTHRU -) - -var nextSeqNr uint32 - -type ifreqHwaddr struct { - IfrnName [IFNAMSIZ]byte - IfruHwaddr syscall.RawSockaddr -} - -type ifreqIndex struct { - IfrnName [IFNAMSIZ]byte - IfruIndex int32 -} - -type ifreqFlags struct { - IfrnName [IFNAMSIZ]byte - Ifruflags uint16 -} - -var native binary.ByteOrder - -var rnd = rand.New(rand.NewSource(time.Now().UnixNano())) - -func init() { - var x uint32 = 0x01020304 - if *(*byte)(unsafe.Pointer(&x)) == 0x01 { - native = binary.BigEndian - } else { - native = binary.LittleEndian - } -} - -func getIpFamily(ip net.IP) int { - if len(ip) <= net.IPv4len { - return syscall.AF_INET - } - if ip.To4() != nil { - return syscall.AF_INET - } - return syscall.AF_INET6 -} - -type NetlinkRequestData interface { - Len() int - ToWireFormat() []byte -} - -type IfInfomsg struct { - syscall.IfInfomsg -} - -func newIfInfomsg(family int) *IfInfomsg { - return &IfInfomsg{ - IfInfomsg: syscall.IfInfomsg{ - Family: uint8(family), - }, - } -} - -func newIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { - msg := newIfInfomsg(family) - parent.children = append(parent.children, msg) - return msg -} - -func (msg *IfInfomsg) ToWireFormat() []byte { - length := syscall.SizeofIfInfomsg - b := make([]byte, length) - b[0] = msg.Family - b[1] = 0 - native.PutUint16(b[2:4], msg.Type) - native.PutUint32(b[4:8], uint32(msg.Index)) - native.PutUint32(b[8:12], msg.Flags) - native.PutUint32(b[12:16], msg.Change) - return b -} - -func (msg *IfInfomsg) Len() int { - return syscall.SizeofIfInfomsg -} - -type IfAddrmsg struct { - syscall.IfAddrmsg -} - -func newIfAddrmsg(family int) *IfAddrmsg { - return &IfAddrmsg{ - IfAddrmsg: syscall.IfAddrmsg{ - Family: uint8(family), - }, - } -} - -func (msg *IfAddrmsg) ToWireFormat() []byte { - length := syscall.SizeofIfAddrmsg - b := make([]byte, length) - b[0] = msg.Family - b[1] = msg.Prefixlen - b[2] = msg.Flags - b[3] = msg.Scope - native.PutUint32(b[4:8], msg.Index) - return b -} - -func (msg *IfAddrmsg) Len() int { - return syscall.SizeofIfAddrmsg -} - -type RtMsg struct { - syscall.RtMsg -} - -func newRtMsg() *RtMsg { - return &RtMsg{ - RtMsg: syscall.RtMsg{ - Table: syscall.RT_TABLE_MAIN, - Scope: syscall.RT_SCOPE_UNIVERSE, - Protocol: syscall.RTPROT_BOOT, - Type: syscall.RTN_UNICAST, - }, - } -} - -func (msg *RtMsg) ToWireFormat() []byte { - length := syscall.SizeofRtMsg - b := make([]byte, length) - b[0] = msg.Family - b[1] = msg.Dst_len - b[2] = msg.Src_len - b[3] = msg.Tos - b[4] = msg.Table - b[5] = msg.Protocol - b[6] = msg.Scope - b[7] = msg.Type - native.PutUint32(b[8:12], msg.Flags) - return b -} - -func (msg *RtMsg) Len() int { - return syscall.SizeofRtMsg -} - -func rtaAlignOf(attrlen int) int { - return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1) -} - -type RtAttr struct { - syscall.RtAttr - Data []byte - children []NetlinkRequestData -} - -func newRtAttr(attrType int, data []byte) *RtAttr { - return &RtAttr{ - RtAttr: syscall.RtAttr{ - Type: uint16(attrType), - }, - children: []NetlinkRequestData{}, - Data: data, - } -} - -func newRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { - attr := newRtAttr(attrType, data) - parent.children = append(parent.children, attr) - return attr -} - -func (a *RtAttr) Len() int { - if len(a.children) == 0 { - return (syscall.SizeofRtAttr + len(a.Data)) - } - - l := 0 - for _, child := range a.children { - l += child.Len() - } - l += syscall.SizeofRtAttr - return rtaAlignOf(l + len(a.Data)) -} - -func (a *RtAttr) ToWireFormat() []byte { - length := a.Len() - buf := make([]byte, rtaAlignOf(length)) - - if a.Data != nil { - copy(buf[4:], a.Data) - } else { - next := 4 - for _, child := range a.children { - childBuf := child.ToWireFormat() - copy(buf[next:], childBuf) - next += rtaAlignOf(len(childBuf)) - } - } - - if l := uint16(length); l != 0 { - native.PutUint16(buf[0:2], l) - } - native.PutUint16(buf[2:4], a.Type) - return buf -} - -func uint32Attr(t int, n uint32) *RtAttr { - buf := make([]byte, 4) - native.PutUint32(buf, n) - return newRtAttr(t, buf) -} - -type NetlinkRequest struct { - syscall.NlMsghdr - Data []NetlinkRequestData -} - -func (rr *NetlinkRequest) ToWireFormat() []byte { - length := rr.Len - dataBytes := make([][]byte, len(rr.Data)) - for i, data := range rr.Data { - dataBytes[i] = data.ToWireFormat() - length += uint32(len(dataBytes[i])) - } - b := make([]byte, length) - native.PutUint32(b[0:4], length) - native.PutUint16(b[4:6], rr.Type) - native.PutUint16(b[6:8], rr.Flags) - native.PutUint32(b[8:12], rr.Seq) - native.PutUint32(b[12:16], rr.Pid) - - next := 16 - for _, data := range dataBytes { - copy(b[next:], data) - next += len(data) - } - return b -} - -func (rr *NetlinkRequest) AddData(data NetlinkRequestData) { - if data != nil { - rr.Data = append(rr.Data, data) - } -} - -func newNetlinkRequest(proto, flags int) *NetlinkRequest { - return &NetlinkRequest{ - NlMsghdr: syscall.NlMsghdr{ - Len: uint32(syscall.NLMSG_HDRLEN), - Type: uint16(proto), - Flags: syscall.NLM_F_REQUEST | uint16(flags), - Seq: atomic.AddUint32(&nextSeqNr, 1), - }, - } -} - -type NetlinkSocket struct { - fd int - lsa syscall.SockaddrNetlink -} - -func getNetlinkSocket() (*NetlinkSocket, error) { - fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, syscall.NETLINK_ROUTE) - if err != nil { - return nil, err - } - s := &NetlinkSocket{ - fd: fd, - } - s.lsa.Family = syscall.AF_NETLINK - if err := syscall.Bind(fd, &s.lsa); err != nil { - syscall.Close(fd) - return nil, err - } - - return s, nil -} - -func (s *NetlinkSocket) Close() { - syscall.Close(s.fd) -} - -func (s *NetlinkSocket) Send(request *NetlinkRequest) error { - if err := syscall.Sendto(s.fd, request.ToWireFormat(), 0, &s.lsa); err != nil { - return err - } - return nil -} - -func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { - rb := make([]byte, syscall.Getpagesize()) - nr, _, err := syscall.Recvfrom(s.fd, rb, 0) - if err != nil { - return nil, err - } - if nr < syscall.NLMSG_HDRLEN { - return nil, ErrShortResponse - } - rb = rb[:nr] - return syscall.ParseNetlinkMessage(rb) -} - -func (s *NetlinkSocket) GetPid() (uint32, error) { - lsa, err := syscall.Getsockname(s.fd) - if err != nil { - return 0, err - } - switch v := lsa.(type) { - case *syscall.SockaddrNetlink: - return v.Pid, nil - } - return 0, ErrWrongSockType -} - -func (s *NetlinkSocket) CheckMessage(m syscall.NetlinkMessage, seq, pid uint32) error { - if m.Header.Seq != seq { - return fmt.Errorf("netlink: invalid seq %d, expected %d", m.Header.Seq, seq) - } - if m.Header.Pid != pid { - return fmt.Errorf("netlink: wrong pid %d, expected %d", m.Header.Pid, pid) - } - if m.Header.Type == syscall.NLMSG_DONE { - return io.EOF - } - if m.Header.Type == syscall.NLMSG_ERROR { - e := int32(native.Uint32(m.Data[0:4])) - if e == 0 { - return io.EOF - } - return syscall.Errno(-e) - } - return nil -} - -func (s *NetlinkSocket) HandleAck(seq uint32) error { - pid, err := s.GetPid() - if err != nil { - return err - } - -outer: - for { - msgs, err := s.Receive() - if err != nil { - return err - } - for _, m := range msgs { - if err := s.CheckMessage(m, seq, pid); err != nil { - if err == io.EOF { - break outer - } - return err - } - } - } - - return nil -} - -func zeroTerminated(s string) []byte { - return []byte(s + "\000") -} - -func nonZeroTerminated(s string) []byte { - return []byte(s) -} - -// Add a new network link of a specified type. -// This is identical to running: ip link add $name type $linkType -func NetworkLinkAdd(name string, linkType string) error { - if name == "" || linkType == "" { - return fmt.Errorf("Neither link name nor link type can be empty!") - } - - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - wb.AddData(msg) - - linkInfo := newRtAttr(syscall.IFLA_LINKINFO, nil) - newRtAttrChild(linkInfo, IFLA_INFO_KIND, nonZeroTerminated(linkType)) - wb.AddData(linkInfo) - - nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name)) - wb.AddData(nameData) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Delete a network link. -// This is identical to running: ip link del $name -func NetworkLinkDel(name string) error { - if name == "" { - return fmt.Errorf("Network link name can not be empty!") - } - - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - iface, err := net.InterfaceByName(name) - if err != nil { - return err - } - - wb := newNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Index = int32(iface.Index) - wb.AddData(msg) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Bring up a particular network interface. -// This is identical to running: ip link set dev $name up -func NetworkLinkUp(iface *net.Interface) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Index = int32(iface.Index) - msg.Flags = syscall.IFF_UP - msg.Change = syscall.IFF_UP - wb.AddData(msg) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Bring down a particular network interface. -// This is identical to running: ip link set $name down -func NetworkLinkDown(iface *net.Interface) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Index = int32(iface.Index) - msg.Flags = 0 & ^syscall.IFF_UP - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Set link layer address ie. MAC Address. -// This is identical to running: ip link set dev $name address $macaddress -func NetworkSetMacAddress(iface *net.Interface, macaddr string) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - hwaddr, err := net.ParseMAC(macaddr) - if err != nil { - return err - } - - var ( - MULTICAST byte = 0x1 - ) - - if hwaddr[0]&0x1 == MULTICAST { - return fmt.Errorf("Multicast MAC Address is not supported: %s", macaddr) - } - - wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - - macdata := make([]byte, 6) - copy(macdata, hwaddr) - data := newRtAttr(IFLA_ADDRESS, macdata) - wb.AddData(data) - - if err := s.Send(wb); err != nil { - return err - } - return s.HandleAck(wb.Seq) -} - -// Set link Maximum Transmission Unit -// This is identical to running: ip link set dev $name mtu $MTU -// bridge is a bitch here https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=292088 -// https://bugzilla.redhat.com/show_bug.cgi?id=697021 -// There is a discussion about how to deal with ifcs joining bridge with MTU > 1500 -// Regular network nterfaces do seem to work though! -func NetworkSetMTU(iface *net.Interface, mtu int) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Type = syscall.RTM_SETLINK - msg.Flags = syscall.NLM_F_REQUEST - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - wb.AddData(uint32Attr(syscall.IFLA_MTU, uint32(mtu))) - - if err := s.Send(wb); err != nil { - return err - } - return s.HandleAck(wb.Seq) -} - -// Set link queue length -// This is identical to running: ip link set dev $name txqueuelen $QLEN -func NetworkSetTxQueueLen(iface *net.Interface, txQueueLen int) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Type = syscall.RTM_SETLINK - msg.Flags = syscall.NLM_F_REQUEST - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - wb.AddData(uint32Attr(syscall.IFLA_TXQLEN, uint32(txQueueLen))) - - if err := s.Send(wb); err != nil { - return err - } - return s.HandleAck(wb.Seq) -} - -func networkMasterAction(iface *net.Interface, rtattr *RtAttr) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Type = syscall.RTM_SETLINK - msg.Flags = syscall.NLM_F_REQUEST - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - wb.AddData(rtattr) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Add an interface to bridge. -// This is identical to running: ip link set $name master $master -func NetworkSetMaster(iface, master *net.Interface) error { - data := uint32Attr(syscall.IFLA_MASTER, uint32(master.Index)) - return networkMasterAction(iface, data) -} - -// Remove an interface from the bridge -// This is is identical to to running: ip link $name set nomaster -func NetworkSetNoMaster(iface *net.Interface) error { - data := uint32Attr(syscall.IFLA_MASTER, 0) - return networkMasterAction(iface, data) -} - -func networkSetNsAction(iface *net.Interface, rtattr *RtAttr) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Index = int32(iface.Index) - wb.AddData(msg) - wb.AddData(rtattr) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Move a particular network interface to a particular network namespace -// specified by PID. This is identical to running: ip link set dev $name netns $pid -func NetworkSetNsPid(iface *net.Interface, nspid int) error { - data := uint32Attr(syscall.IFLA_NET_NS_PID, uint32(nspid)) - return networkSetNsAction(iface, data) -} - -// Move a particular network interface to a particular mounted -// network namespace specified by file descriptor. -// This is idential to running: ip link set dev $name netns $fd -func NetworkSetNsFd(iface *net.Interface, fd int) error { - data := uint32Attr(IFLA_NET_NS_FD, uint32(fd)) - return networkSetNsAction(iface, data) -} - -// Rename a particular interface to a different name -// !!! Note that you can't rename an active interface. You need to bring it down before renaming it. -// This is identical to running: ip link set dev ${oldName} name ${newName} -func NetworkChangeName(iface *net.Interface, newName string) error { - if len(newName) >= IFNAMSIZ { - return fmt.Errorf("Interface name %s too long", newName) - } - - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - wb.AddData(msg) - - nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(newName)) - wb.AddData(nameData) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Add a new VETH pair link on the host -// This is identical to running: ip link add name $name type veth peer name $peername -func NetworkCreateVethPair(name1, name2 string, txQueueLen int) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - wb.AddData(msg) - - nameData := newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(name1)) - wb.AddData(nameData) - - txqLen := make([]byte, 4) - native.PutUint32(txqLen, uint32(txQueueLen)) - txqData := newRtAttr(syscall.IFLA_TXQLEN, txqLen) - wb.AddData(txqData) - - nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) - newRtAttrChild(nest1, IFLA_INFO_KIND, zeroTerminated("veth")) - nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) - nest3 := newRtAttrChild(nest2, VETH_INFO_PEER, nil) - - newIfInfomsgChild(nest3, syscall.AF_UNSPEC) - newRtAttrChild(nest3, syscall.IFLA_IFNAME, zeroTerminated(name2)) - - txqLen2 := make([]byte, 4) - native.PutUint32(txqLen2, uint32(txQueueLen)) - newRtAttrChild(nest3, syscall.IFLA_TXQLEN, txqLen2) - - wb.AddData(nest1) - - if err := s.Send(wb); err != nil { - return err - } - - if err := s.HandleAck(wb.Seq); err != nil { - if os.IsExist(err) { - return ErrInterfaceExists - } - - return err - } - - return nil -} - -// Add a new VLAN interface with masterDev as its upper device -// This is identical to running: -// ip link add name $name link $masterdev type vlan id $id -func NetworkLinkAddVlan(masterDev, vlanDev string, vlanId uint16) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - - masterDevIfc, err := net.InterfaceByName(masterDev) - if err != nil { - return err - } - - msg := newIfInfomsg(syscall.AF_UNSPEC) - wb.AddData(msg) - - nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) - newRtAttrChild(nest1, IFLA_INFO_KIND, nonZeroTerminated("vlan")) - - nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) - vlanData := make([]byte, 2) - native.PutUint16(vlanData, vlanId) - newRtAttrChild(nest2, IFLA_VLAN_ID, vlanData) - wb.AddData(nest1) - - wb.AddData(uint32Attr(syscall.IFLA_LINK, uint32(masterDevIfc.Index))) - wb.AddData(newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(vlanDev))) - - if err := s.Send(wb); err != nil { - return err - } - return s.HandleAck(wb.Seq) -} - -// MacVlan link has LowerDev, UpperDev and operates in Mode mode -// This simplifies the code when creating MacVlan or MacVtap interface -type MacVlanLink struct { - MasterDev string - SlaveDev string - mode string -} - -func (m MacVlanLink) Mode() uint32 { - modeMap := map[string]uint32{ - "private": MACVLAN_MODE_PRIVATE, - "vepa": MACVLAN_MODE_VEPA, - "bridge": MACVLAN_MODE_BRIDGE, - "passthru": MACVLAN_MODE_PASSTHRU, - } - - return modeMap[m.mode] -} - -// Add MAC VLAN network interface with masterDev as its upper device -// This is identical to running: -// ip link add name $name link $masterdev type macvlan mode $mode -func networkLinkMacVlan(dev_type string, mcvln *MacVlanLink) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - - masterDevIfc, err := net.InterfaceByName(mcvln.MasterDev) - if err != nil { - return err - } - - msg := newIfInfomsg(syscall.AF_UNSPEC) - wb.AddData(msg) - - nest1 := newRtAttr(syscall.IFLA_LINKINFO, nil) - newRtAttrChild(nest1, IFLA_INFO_KIND, nonZeroTerminated(dev_type)) - - nest2 := newRtAttrChild(nest1, IFLA_INFO_DATA, nil) - macVlanData := make([]byte, 4) - native.PutUint32(macVlanData, mcvln.Mode()) - newRtAttrChild(nest2, IFLA_MACVLAN_MODE, macVlanData) - wb.AddData(nest1) - - wb.AddData(uint32Attr(syscall.IFLA_LINK, uint32(masterDevIfc.Index))) - wb.AddData(newRtAttr(syscall.IFLA_IFNAME, zeroTerminated(mcvln.SlaveDev))) - - if err := s.Send(wb); err != nil { - return err - } - return s.HandleAck(wb.Seq) -} - -func NetworkLinkAddMacVlan(masterDev, macVlanDev string, mode string) error { - return networkLinkMacVlan("macvlan", &MacVlanLink{ - MasterDev: masterDev, - SlaveDev: macVlanDev, - mode: mode, - }) -} - -func NetworkLinkAddMacVtap(masterDev, macVlanDev string, mode string) error { - return networkLinkMacVlan("macvtap", &MacVlanLink{ - MasterDev: masterDev, - SlaveDev: macVlanDev, - mode: mode, - }) -} - -func networkLinkIpAction(action, flags int, ifa IfAddr) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - family := getIpFamily(ifa.IP) - - wb := newNetlinkRequest(action, flags) - - msg := newIfAddrmsg(family) - msg.Index = uint32(ifa.Iface.Index) - prefixLen, _ := ifa.IPNet.Mask.Size() - msg.Prefixlen = uint8(prefixLen) - wb.AddData(msg) - - var ipData []byte - if family == syscall.AF_INET { - ipData = ifa.IP.To4() - } else { - ipData = ifa.IP.To16() - } - - localData := newRtAttr(syscall.IFA_LOCAL, ipData) - wb.AddData(localData) - - addrData := newRtAttr(syscall.IFA_ADDRESS, ipData) - wb.AddData(addrData) - - if err := s.Send(wb); err != nil { - return err - } - - return s.HandleAck(wb.Seq) -} - -// Delete an IP address from an interface. This is identical to: -// ip addr del $ip/$ipNet dev $iface -func NetworkLinkDelIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { - return networkLinkIpAction( - syscall.RTM_DELADDR, - syscall.NLM_F_ACK, - IfAddr{iface, ip, ipNet}, - ) -} - -// Add an Ip address to an interface. This is identical to: -// ip addr add $ip/$ipNet dev $iface -func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { - return networkLinkIpAction( - syscall.RTM_NEWADDR, - syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK, - IfAddr{iface, ip, ipNet}, - ) -} - -// Returns an array of IPNet for all the currently routed subnets on ipv4 -// This is similar to the first column of "ip route" output -func NetworkGetRoutes() ([]Route, error) { - s, err := getNetlinkSocket() - if err != nil { - return nil, err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP) - - msg := newIfInfomsg(syscall.AF_UNSPEC) - wb.AddData(msg) - - if err := s.Send(wb); err != nil { - return nil, err - } - - pid, err := s.GetPid() - if err != nil { - return nil, err - } - - res := make([]Route, 0) - -outer: - for { - msgs, err := s.Receive() - if err != nil { - return nil, err - } - for _, m := range msgs { - if err := s.CheckMessage(m, wb.Seq, pid); err != nil { - if err == io.EOF { - break outer - } - return nil, err - } - if m.Header.Type != syscall.RTM_NEWROUTE { - continue - } - - var r Route - - msg := (*RtMsg)(unsafe.Pointer(&m.Data[0:syscall.SizeofRtMsg][0])) - - if msg.Flags&syscall.RTM_F_CLONED != 0 { - // Ignore cloned routes - continue - } - - if msg.Table != syscall.RT_TABLE_MAIN { - // Ignore non-main tables - continue - } - - if msg.Family != syscall.AF_INET { - // Ignore non-ipv4 routes - continue - } - - if msg.Dst_len == 0 { - // Default routes - r.Default = true - } - - attrs, err := syscall.ParseNetlinkRouteAttr(&m) - if err != nil { - return nil, err - } - for _, attr := range attrs { - switch attr.Attr.Type { - case syscall.RTA_DST: - ip := attr.Value - r.IPNet = &net.IPNet{ - IP: ip, - Mask: net.CIDRMask(int(msg.Dst_len), 8*len(ip)), - } - case syscall.RTA_OIF: - index := int(native.Uint32(attr.Value[0:4])) - r.Iface, _ = net.InterfaceByIndex(index) - } - } - if r.Default || r.IPNet != nil { - res = append(res, r) - } - } - } - - return res, nil -} - -// Add a new route table entry. -func AddRoute(destination, source, gateway, device string) error { - if destination == "" && source == "" && gateway == "" { - return fmt.Errorf("one of destination, source or gateway must not be blank") - } - - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - - wb := newNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) - msg := newRtMsg() - currentFamily := -1 - var rtAttrs []*RtAttr - - if destination != "" { - destIP, destNet, err := net.ParseCIDR(destination) - if err != nil { - return fmt.Errorf("destination CIDR %s couldn't be parsed", destination) - } - destFamily := getIpFamily(destIP) - currentFamily = destFamily - destLen, bits := destNet.Mask.Size() - if destLen == 0 && bits == 0 { - return fmt.Errorf("destination CIDR %s generated a non-canonical Mask", destination) - } - msg.Family = uint8(destFamily) - msg.Dst_len = uint8(destLen) - var destData []byte - if destFamily == syscall.AF_INET { - destData = destIP.To4() - } else { - destData = destIP.To16() - } - rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_DST, destData)) - } - - if source != "" { - srcIP := net.ParseIP(source) - if srcIP == nil { - return fmt.Errorf("source IP %s couldn't be parsed", source) - } - srcFamily := getIpFamily(srcIP) - if currentFamily != -1 && currentFamily != srcFamily { - return fmt.Errorf("source and destination ip were not the same IP family") - } - currentFamily = srcFamily - msg.Family = uint8(srcFamily) - var srcData []byte - if srcFamily == syscall.AF_INET { - srcData = srcIP.To4() - } else { - srcData = srcIP.To16() - } - rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_PREFSRC, srcData)) - } - - if gateway != "" { - gwIP := net.ParseIP(gateway) - if gwIP == nil { - return fmt.Errorf("gateway IP %s couldn't be parsed", gateway) - } - gwFamily := getIpFamily(gwIP) - if currentFamily != -1 && currentFamily != gwFamily { - return fmt.Errorf("gateway, source, and destination ip were not the same IP family") - } - msg.Family = uint8(gwFamily) - var gwData []byte - if gwFamily == syscall.AF_INET { - gwData = gwIP.To4() - } else { - gwData = gwIP.To16() - } - rtAttrs = append(rtAttrs, newRtAttr(syscall.RTA_GATEWAY, gwData)) - } - - wb.AddData(msg) - for _, attr := range rtAttrs { - wb.AddData(attr) - } - - iface, err := net.InterfaceByName(device) - if err != nil { - return err - } - wb.AddData(uint32Attr(syscall.RTA_OIF, uint32(iface.Index))) - - if err := s.Send(wb); err != nil { - return err - } - return s.HandleAck(wb.Seq) -} - -// Add a new default gateway. Identical to: -// ip route add default via $ip -func AddDefaultGw(ip, device string) error { - return AddRoute("", "", ip, device) -} - -// THIS CODE DOES NOT COMMUNICATE WITH KERNEL VIA RTNETLINK INTERFACE -// IT IS HERE FOR BACKWARDS COMPATIBILITY WITH OLDER LINUX KERNELS -// WHICH SHIP WITH OLDER NOT ENTIRELY FUNCTIONAL VERSION OF NETLINK -func getIfSocket() (fd int, err error) { - for _, socket := range []int{ - syscall.AF_INET, - syscall.AF_PACKET, - syscall.AF_INET6, - } { - if fd, err = syscall.Socket(socket, syscall.SOCK_DGRAM, 0); err == nil { - break - } - } - if err == nil { - return fd, nil - } - return -1, err -} - -// Create the actual bridge device. This is more backward-compatible than -// netlink.NetworkLinkAdd and works on RHEL 6. -func CreateBridge(name string, setMacAddr bool) error { - if len(name) >= IFNAMSIZ { - return fmt.Errorf("Interface name %s too long", name) - } - - s, err := getIfSocket() - if err != nil { - return err - } - defer syscall.Close(s) - - nameBytePtr, err := syscall.BytePtrFromString(name) - if err != nil { - return err - } - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), SIOC_BRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { - return err - } - if setMacAddr { - return SetMacAddress(name, randMacAddr()) - } - return nil -} - -// Delete the actual bridge device. -func DeleteBridge(name string) error { - s, err := getIfSocket() - if err != nil { - return err - } - defer syscall.Close(s) - - nameBytePtr, err := syscall.BytePtrFromString(name) - if err != nil { - return err - } - - var ifr ifreqFlags - copy(ifr.IfrnName[:len(ifr.IfrnName)-1], []byte(name)) - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), - syscall.SIOCSIFFLAGS, uintptr(unsafe.Pointer(&ifr))); err != 0 { - return err - } - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), - SIOC_BRDELBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { - return err - } - return nil -} - -func ifIoctBridge(iface, master *net.Interface, op uintptr) error { - if len(master.Name) >= IFNAMSIZ { - return fmt.Errorf("Interface name %s too long", master.Name) - } - - s, err := getIfSocket() - if err != nil { - return err - } - defer syscall.Close(s) - - ifr := ifreqIndex{} - copy(ifr.IfrnName[:len(ifr.IfrnName)-1], master.Name) - ifr.IfruIndex = int32(iface.Index) - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), op, uintptr(unsafe.Pointer(&ifr))); err != 0 { - return err - } - - return nil -} - -// Add a slave to a bridge device. This is more backward-compatible than -// netlink.NetworkSetMaster and works on RHEL 6. -func AddToBridge(iface, master *net.Interface) error { - return ifIoctBridge(iface, master, SIOC_BRADDIF) -} - -// Detach a slave from a bridge device. This is more backward-compatible than -// netlink.NetworkSetMaster and works on RHEL 6. -func DelFromBridge(iface, master *net.Interface) error { - return ifIoctBridge(iface, master, SIOC_BRDELIF) -} - -func randMacAddr() string { - hw := make(net.HardwareAddr, 6) - for i := 0; i < 6; i++ { - hw[i] = byte(rnd.Intn(255)) - } - hw[0] &^= 0x1 // clear multicast bit - hw[0] |= 0x2 // set local assignment bit (IEEE802) - return hw.String() -} - -func SetMacAddress(name, addr string) error { - if len(name) >= IFNAMSIZ { - return fmt.Errorf("Interface name %s too long", name) - } - - hw, err := net.ParseMAC(addr) - if err != nil { - return err - } - - s, err := getIfSocket() - if err != nil { - return err - } - defer syscall.Close(s) - - ifr := ifreqHwaddr{} - ifr.IfruHwaddr.Family = syscall.ARPHRD_ETHER - copy(ifr.IfrnName[:len(ifr.IfrnName)-1], name) - - for i := 0; i < 6; i++ { - ifr.IfruHwaddr.Data[i] = ifrDataByte(hw[i]) - } - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), syscall.SIOCSIFHWADDR, uintptr(unsafe.Pointer(&ifr))); err != 0 { - return err - } - return nil -} - -func SetHairpinMode(iface *net.Interface, enabled bool) error { - s, err := getNetlinkSocket() - if err != nil { - return err - } - defer s.Close() - req := newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - - msg := newIfInfomsg(syscall.AF_BRIDGE) - msg.Type = syscall.RTM_SETLINK - msg.Flags = syscall.NLM_F_REQUEST - msg.Index = int32(iface.Index) - msg.Change = DEFAULT_CHANGE - req.AddData(msg) - - mode := []byte{0} - if enabled { - mode[0] = byte(1) - } - - br := newRtAttr(syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED, nil) - newRtAttrChild(br, IFLA_BRPORT_MODE, mode) - req.AddData(br) - if err := s.Send(req); err != nil { - return err - } - - return s.HandleAck(req.Seq) -} - -func ChangeName(iface *net.Interface, newName string) error { - if len(newName) >= IFNAMSIZ { - return fmt.Errorf("Interface name %s too long", newName) - } - - fd, err := getIfSocket() - if err != nil { - return err - } - defer syscall.Close(fd) - - data := [IFNAMSIZ * 2]byte{} - // the "-1"s here are very important for ensuring we get proper null - // termination of our new C strings - copy(data[:IFNAMSIZ-1], iface.Name) - copy(data[IFNAMSIZ:IFNAMSIZ*2-1], newName) - - if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.SIOCSIFNAME, uintptr(unsafe.Pointer(&data[0]))); errno != 0 { - return errno - } - - return nil -} diff --git a/vendor/github.com/docker/libcontainer/netlink/netlink_linux_armppc64.go b/vendor/github.com/docker/libcontainer/netlink/netlink_linux_armppc64.go deleted file mode 100644 index 965e0bfb..00000000 --- a/vendor/github.com/docker/libcontainer/netlink/netlink_linux_armppc64.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build arm ppc64 ppc64le - -package netlink - -func ifrDataByte(b byte) uint8 { - return uint8(b) -} diff --git a/vendor/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go b/vendor/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go deleted file mode 100644 index 74462798..00000000 --- a/vendor/github.com/docker/libcontainer/netlink/netlink_linux_notarm.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !arm,!ppc64,!ppc64le - -package netlink - -func ifrDataByte(b byte) int8 { - return int8(b) -} diff --git a/vendor/github.com/docker/libcontainer/netlink/netlink_unsupported.go b/vendor/github.com/docker/libcontainer/netlink/netlink_unsupported.go deleted file mode 100644 index 4b11bf8b..00000000 --- a/vendor/github.com/docker/libcontainer/netlink/netlink_unsupported.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build !linux - -package netlink - -import ( - "errors" - "net" -) - -var ( - ErrNotImplemented = errors.New("not implemented") -) - -func NetworkGetRoutes() ([]Route, error) { - return nil, ErrNotImplemented -} - -func NetworkLinkAdd(name string, linkType string) error { - return ErrNotImplemented -} - -func NetworkLinkDel(name string) error { - return ErrNotImplemented -} - -func NetworkLinkUp(iface *net.Interface) error { - return ErrNotImplemented -} - -func NetworkLinkAddIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { - return ErrNotImplemented -} - -func NetworkLinkDelIp(iface *net.Interface, ip net.IP, ipNet *net.IPNet) error { - return ErrNotImplemented -} - -func AddRoute(destination, source, gateway, device string) error { - return ErrNotImplemented -} - -func AddDefaultGw(ip, device string) error { - return ErrNotImplemented -} - -func NetworkSetMTU(iface *net.Interface, mtu int) error { - return ErrNotImplemented -} - -func NetworkSetTxQueueLen(iface *net.Interface, txQueueLen int) error { - return ErrNotImplemented -} - -func NetworkCreateVethPair(name1, name2 string, txQueueLen int) error { - return ErrNotImplemented -} - -func NetworkChangeName(iface *net.Interface, newName string) error { - return ErrNotImplemented -} - -func NetworkSetNsFd(iface *net.Interface, fd int) error { - return ErrNotImplemented -} - -func NetworkSetNsPid(iface *net.Interface, nspid int) error { - return ErrNotImplemented -} - -func NetworkSetMaster(iface, master *net.Interface) error { - return ErrNotImplemented -} - -func NetworkLinkDown(iface *net.Interface) error { - return ErrNotImplemented -} - -func CreateBridge(name string, setMacAddr bool) error { - return ErrNotImplemented -} - -func DeleteBridge(name string) error { - return ErrNotImplemented -} - -func AddToBridge(iface, master *net.Interface) error { - return ErrNotImplemented -} diff --git a/vendor/github.com/docker/libcontainer/update-vendor.sh b/vendor/github.com/docker/libcontainer/update-vendor.sh deleted file mode 100755 index 69b6e50c..00000000 --- a/vendor/github.com/docker/libcontainer/update-vendor.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash -set -e - -cd "$(dirname "$BASH_SOURCE")" - -# Downloads dependencies into vendor/ directory -mkdir -p vendor -cd vendor - -clone() { - vcs=$1 - pkg=$2 - rev=$3 - - pkg_url=https://$pkg - target_dir=src/$pkg - - echo -n "$pkg @ $rev: " - - if [ -d $target_dir ]; then - echo -n 'rm old, ' - rm -fr $target_dir - fi - - echo -n 'clone, ' - case $vcs in - git) - git clone --quiet --no-checkout $pkg_url $target_dir - ( cd $target_dir && git reset --quiet --hard $rev ) - ;; - hg) - hg clone --quiet --updaterev $rev $pkg_url $target_dir - ;; - esac - - echo -n 'rm VCS, ' - ( cd $target_dir && rm -rf .{git,hg} ) - - echo done -} - -# the following lines are in sorted order, FYI -clone git github.com/codegangsta/cli 1.1.0 -clone git github.com/coreos/go-systemd v2 -clone git github.com/godbus/dbus v2 -clone git github.com/Sirupsen/logrus v0.7.3 -clone git github.com/syndtr/gocapability 66ef2aa -clone git github.com/golang/protobuf 655cdfa588ea - -# intentionally not vendoring Docker itself... that'd be a circle :) diff --git a/vendor/github.com/docker/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/libtrust/CONTRIBUTING.md new file mode 100644 index 00000000..05be0f8a --- /dev/null +++ b/vendor/github.com/docker/libtrust/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/vendor/github.com/docker/libcontainer/LICENSE b/vendor/github.com/docker/libtrust/LICENSE similarity index 100% rename from vendor/github.com/docker/libcontainer/LICENSE rename to vendor/github.com/docker/libtrust/LICENSE diff --git a/vendor/github.com/docker/libtrust/MAINTAINERS b/vendor/github.com/docker/libtrust/MAINTAINERS new file mode 100644 index 00000000..9768175f --- /dev/null +++ b/vendor/github.com/docker/libtrust/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md new file mode 100644 index 00000000..8e7db381 --- /dev/null +++ b/vendor/github.com/docker/libtrust/README.md @@ -0,0 +1,18 @@ +# libtrust + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/vendor/github.com/docker/libtrust/certificates.go b/vendor/github.com/docker/libtrust/certificates.go new file mode 100644 index 00000000..3dcca33c --- /dev/null +++ b/vendor/github.com/docker/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/vendor/github.com/docker/libtrust/doc.go b/vendor/github.com/docker/libtrust/doc.go new file mode 100644 index 00000000..ec5d2159 --- /dev/null +++ b/vendor/github.com/docker/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/vendor/github.com/docker/libtrust/ec_key.go b/vendor/github.com/docker/libtrust/ec_key.go new file mode 100644 index 00000000..00bbe4b3 --- /dev/null +++ b/vendor/github.com/docker/libtrust/ec_key.go @@ -0,0 +1,428 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/vendor/github.com/docker/libtrust/filter.go b/vendor/github.com/docker/libtrust/filter.go new file mode 100644 index 00000000..5b2b4fca --- /dev/null +++ b/vendor/github.com/docker/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/vendor/github.com/docker/libtrust/hash.go b/vendor/github.com/docker/libtrust/hash.go new file mode 100644 index 00000000..a2df787d --- /dev/null +++ b/vendor/github.com/docker/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/vendor/github.com/docker/libtrust/jsonsign.go b/vendor/github.com/docker/libtrust/jsonsign.go new file mode 100644 index 00000000..cb2ca9a7 --- /dev/null +++ b/vendor/github.com/docker/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/vendor/github.com/docker/libtrust/key.go b/vendor/github.com/docker/libtrust/key.go new file mode 100644 index 00000000..73642db2 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/vendor/github.com/docker/libtrust/key_files.go b/vendor/github.com/docker/libtrust/key_files.go new file mode 100644 index 00000000..c526de54 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/vendor/github.com/docker/libtrust/key_manager.go b/vendor/github.com/docker/libtrust/key_manager.go new file mode 100644 index 00000000..9a98ae35 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/vendor/github.com/docker/libtrust/rsa_key.go b/vendor/github.com/docker/libtrust/rsa_key.go new file mode 100644 index 00000000..dac4cacf --- /dev/null +++ b/vendor/github.com/docker/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/vendor/github.com/docker/libtrust/util.go b/vendor/github.com/docker/libtrust/util.go new file mode 100644 index 00000000..d88176cc --- /dev/null +++ b/vendor/github.com/docker/libtrust/util.go @@ -0,0 +1,363 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters ommitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + s = strings.Replace(s, "\n", "", -1) + s = strings.Replace(s, " ", "", -1) + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/vendor/github.com/godbus/dbus/CONTRIBUTING.md b/vendor/github.com/godbus/dbus/CONTRIBUTING.md new file mode 100644 index 00000000..c88f9b2b --- /dev/null +++ b/vendor/github.com/godbus/dbus/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# How to Contribute + +## Getting Started + +- Fork the repository on GitHub +- Read the [README](README.markdown) for build and test instructions +- Play with the project, submit bugs, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! + +### Format of the Commit Message + +We follow a rough convention for commit messages that is designed to answer two +questions: what changed and why. The subject line should feature the what and +the body of the commit should describe the why. + +``` +scripts: add the test-cluster command + +this uses tmux to setup a test cluster that you can easily kill and +start for debugging. + +Fixes #38 +``` + +The format can be described more formally as follows: + +``` +: + + + +