mirror of
https://github.com/rancher/os.git
synced 2025-07-13 14:44:03 +00:00
Cleanup ./vendor dir
This commit is contained in:
parent
f3afb076f7
commit
ea2d8b30e3
50
vendor/github.com/Sirupsen/logrus/examples/basic/basic.go
generated
vendored
50
vendor/github.com/Sirupsen/logrus/examples/basic/basic.go
generated
vendored
@ -1,50 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.New()
|
||||
|
||||
func init() {
|
||||
log.Formatter = new(logrus.JSONFormatter)
|
||||
log.Formatter = new(logrus.TextFormatter) // default
|
||||
log.Level = logrus.DebugLevel
|
||||
}
|
||||
|
||||
func main() {
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"err": err,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
}
|
||||
}()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 8,
|
||||
}).Debug("Started observing beach")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"temperature": -4,
|
||||
}).Debug("Temperature changes")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "orca",
|
||||
"size": 9009,
|
||||
}).Panic("It's over 9000!")
|
||||
}
|
30
vendor/github.com/Sirupsen/logrus/examples/hook/hook.go
generated
vendored
30
vendor/github.com/Sirupsen/logrus/examples/hook/hook.go
generated
vendored
@ -1,30 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
||||
)
|
||||
|
||||
var log = logrus.New()
|
||||
|
||||
func init() {
|
||||
log.Formatter = new(logrus.TextFormatter) // default
|
||||
log.Hooks.Add(airbrake.NewHook("https://example.com", "xyz", "development"))
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
}
|
56
vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
generated
vendored
56
vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash.go
generated
vendored
@ -1,56 +0,0 @@
|
||||
package logstash
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Formatter generates json in logstash format.
|
||||
// Logstash site: http://logstash.net/
|
||||
type LogstashFormatter struct {
|
||||
Type string // if not empty use for logstash type field.
|
||||
|
||||
// TimestampFormat sets the format used for timestamps.
|
||||
TimestampFormat string
|
||||
}
|
||||
|
||||
func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
entry.Data["@version"] = 1
|
||||
|
||||
if f.TimestampFormat == "" {
|
||||
f.TimestampFormat = logrus.DefaultTimestampFormat
|
||||
}
|
||||
|
||||
entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat)
|
||||
|
||||
// set message field
|
||||
v, ok := entry.Data["message"]
|
||||
if ok {
|
||||
entry.Data["fields.message"] = v
|
||||
}
|
||||
entry.Data["message"] = entry.Message
|
||||
|
||||
// set level field
|
||||
v, ok = entry.Data["level"]
|
||||
if ok {
|
||||
entry.Data["fields.level"] = v
|
||||
}
|
||||
entry.Data["level"] = entry.Level.String()
|
||||
|
||||
// set type field
|
||||
if f.Type != "" {
|
||||
v, ok = entry.Data["type"]
|
||||
if ok {
|
||||
entry.Data["fields.type"] = v
|
||||
}
|
||||
entry.Data["type"] = f.Type
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
52
vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go
generated
vendored
52
vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
package logstash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLogstashFormatter(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
lf := LogstashFormatter{Type: "abc"}
|
||||
|
||||
fields := logrus.Fields{
|
||||
"message": "def",
|
||||
"level": "ijk",
|
||||
"type": "lmn",
|
||||
"one": 1,
|
||||
"pi": 3.14,
|
||||
"bool": true,
|
||||
}
|
||||
|
||||
entry := logrus.WithFields(fields)
|
||||
entry.Message = "msg"
|
||||
entry.Level = logrus.InfoLevel
|
||||
|
||||
b, _ := lf.Format(entry)
|
||||
|
||||
var data map[string]interface{}
|
||||
dec := json.NewDecoder(bytes.NewReader(b))
|
||||
dec.UseNumber()
|
||||
dec.Decode(&data)
|
||||
|
||||
// base fields
|
||||
assert.Equal(json.Number("1"), data["@version"])
|
||||
assert.NotEmpty(data["@timestamp"])
|
||||
assert.Equal("abc", data["type"])
|
||||
assert.Equal("msg", data["message"])
|
||||
assert.Equal("info", data["level"])
|
||||
|
||||
// substituted fields
|
||||
assert.Equal("def", data["fields.message"])
|
||||
assert.Equal("ijk", data["fields.level"])
|
||||
assert.Equal("lmn", data["fields.type"])
|
||||
|
||||
// formats
|
||||
assert.Equal(json.Number("1"), data["one"])
|
||||
assert.Equal(json.Number("3.14"), data["pi"])
|
||||
assert.Equal(true, data["bool"])
|
||||
}
|
54
vendor/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
generated
vendored
54
vendor/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
package airbrake
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/tobi/airbrake-go"
|
||||
)
|
||||
|
||||
// AirbrakeHook to send exceptions to an exception-tracking service compatible
|
||||
// with the Airbrake API.
|
||||
type airbrakeHook struct {
|
||||
APIKey string
|
||||
Endpoint string
|
||||
Environment string
|
||||
}
|
||||
|
||||
func NewHook(endpoint, apiKey, env string) *airbrakeHook {
|
||||
return &airbrakeHook{
|
||||
APIKey: apiKey,
|
||||
Endpoint: endpoint,
|
||||
Environment: env,
|
||||
}
|
||||
}
|
||||
|
||||
func (hook *airbrakeHook) Fire(entry *logrus.Entry) error {
|
||||
airbrake.ApiKey = hook.APIKey
|
||||
airbrake.Endpoint = hook.Endpoint
|
||||
airbrake.Environment = hook.Environment
|
||||
|
||||
var notifyErr error
|
||||
err, ok := entry.Data["error"].(error)
|
||||
if ok {
|
||||
notifyErr = err
|
||||
} else {
|
||||
notifyErr = errors.New(entry.Message)
|
||||
}
|
||||
|
||||
airErr := airbrake.Notify(notifyErr)
|
||||
if airErr != nil {
|
||||
return fmt.Errorf("Failed to send error to Airbrake: %s", airErr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *airbrakeHook) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.ErrorLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.PanicLevel,
|
||||
}
|
||||
}
|
133
vendor/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go
generated
vendored
133
vendor/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go
generated
vendored
@ -1,133 +0,0 @@
|
||||
package airbrake
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
type notice struct {
|
||||
Error NoticeError `xml:"error"`
|
||||
}
|
||||
type NoticeError struct {
|
||||
Class string `xml:"class"`
|
||||
Message string `xml:"message"`
|
||||
}
|
||||
|
||||
type customErr struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *customErr) Error() string {
|
||||
return e.msg
|
||||
}
|
||||
|
||||
const (
|
||||
testAPIKey = "abcxyz"
|
||||
testEnv = "development"
|
||||
expectedClass = "*airbrake.customErr"
|
||||
expectedMsg = "foo"
|
||||
unintendedMsg = "Airbrake will not see this string"
|
||||
)
|
||||
|
||||
var (
|
||||
noticeError = make(chan NoticeError, 1)
|
||||
)
|
||||
|
||||
// TestLogEntryMessageReceived checks if invoking Logrus' log.Error
|
||||
// method causes an XML payload containing the log entry message is received
|
||||
// by a HTTP server emulating an Airbrake-compatible endpoint.
|
||||
func TestLogEntryMessageReceived(t *testing.T) {
|
||||
log := logrus.New()
|
||||
ts := startAirbrakeServer(t)
|
||||
defer ts.Close()
|
||||
|
||||
hook := NewHook(ts.URL, testAPIKey, "production")
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
log.Error(expectedMsg)
|
||||
|
||||
select {
|
||||
case received := <-noticeError:
|
||||
if received.Message != expectedMsg {
|
||||
t.Errorf("Unexpected message received: %s", received.Message)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Error("Timed out; no notice received by Airbrake API")
|
||||
}
|
||||
}
|
||||
|
||||
// TestLogEntryMessageReceived confirms that, when passing an error type using
|
||||
// logrus.Fields, a HTTP server emulating an Airbrake endpoint receives the
|
||||
// error message returned by the Error() method on the error interface
|
||||
// rather than the logrus.Entry.Message string.
|
||||
func TestLogEntryWithErrorReceived(t *testing.T) {
|
||||
log := logrus.New()
|
||||
ts := startAirbrakeServer(t)
|
||||
defer ts.Close()
|
||||
|
||||
hook := NewHook(ts.URL, testAPIKey, "production")
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"error": &customErr{expectedMsg},
|
||||
}).Error(unintendedMsg)
|
||||
|
||||
select {
|
||||
case received := <-noticeError:
|
||||
if received.Message != expectedMsg {
|
||||
t.Errorf("Unexpected message received: %s", received.Message)
|
||||
}
|
||||
if received.Class != expectedClass {
|
||||
t.Errorf("Unexpected error class: %s", received.Class)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Error("Timed out; no notice received by Airbrake API")
|
||||
}
|
||||
}
|
||||
|
||||
// TestLogEntryWithNonErrorTypeNotReceived confirms that, when passing a
|
||||
// non-error type using logrus.Fields, a HTTP server emulating an Airbrake
|
||||
// endpoint receives the logrus.Entry.Message string.
|
||||
//
|
||||
// Only error types are supported when setting the 'error' field using
|
||||
// logrus.WithFields().
|
||||
func TestLogEntryWithNonErrorTypeNotReceived(t *testing.T) {
|
||||
log := logrus.New()
|
||||
ts := startAirbrakeServer(t)
|
||||
defer ts.Close()
|
||||
|
||||
hook := NewHook(ts.URL, testAPIKey, "production")
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"error": expectedMsg,
|
||||
}).Error(unintendedMsg)
|
||||
|
||||
select {
|
||||
case received := <-noticeError:
|
||||
if received.Message != unintendedMsg {
|
||||
t.Errorf("Unexpected message received: %s", received.Message)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Error("Timed out; no notice received by Airbrake API")
|
||||
}
|
||||
}
|
||||
|
||||
func startAirbrakeServer(t *testing.T) *httptest.Server {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var notice notice
|
||||
if err := xml.NewDecoder(r.Body).Decode(¬ice); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
r.Body.Close()
|
||||
|
||||
noticeError <- notice.Error
|
||||
}))
|
||||
|
||||
return ts
|
||||
}
|
68
vendor/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go
generated
vendored
68
vendor/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go
generated
vendored
@ -1,68 +0,0 @@
|
||||
package logrus_bugsnag
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/bugsnag/bugsnag-go"
|
||||
)
|
||||
|
||||
type bugsnagHook struct{}
|
||||
|
||||
// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before
|
||||
// bugsnag.Configure. Bugsnag must be configured before the hook.
|
||||
var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook")
|
||||
|
||||
// ErrBugsnagSendFailed indicates that the hook failed to submit an error to
|
||||
// bugsnag. The error was successfully generated, but `bugsnag.Notify()`
|
||||
// failed.
|
||||
type ErrBugsnagSendFailed struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e ErrBugsnagSendFailed) Error() string {
|
||||
return "failed to send error to Bugsnag: " + e.err.Error()
|
||||
}
|
||||
|
||||
// NewBugsnagHook initializes a logrus hook which sends exceptions to an
|
||||
// exception-tracking service compatible with the Bugsnag API. Before using
|
||||
// this hook, you must call bugsnag.Configure(). The returned object should be
|
||||
// registered with a log via `AddHook()`
|
||||
//
|
||||
// Entries that trigger an Error, Fatal or Panic should now include an "error"
|
||||
// field to send to Bugsnag.
|
||||
func NewBugsnagHook() (*bugsnagHook, error) {
|
||||
if bugsnag.Config.APIKey == "" {
|
||||
return nil, ErrBugsnagUnconfigured
|
||||
}
|
||||
return &bugsnagHook{}, nil
|
||||
}
|
||||
|
||||
// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the
|
||||
// "error" field (or the Message if the error isn't present) and sends it off.
|
||||
func (hook *bugsnagHook) Fire(entry *logrus.Entry) error {
|
||||
var notifyErr error
|
||||
err, ok := entry.Data["error"].(error)
|
||||
if ok {
|
||||
notifyErr = err
|
||||
} else {
|
||||
notifyErr = errors.New(entry.Message)
|
||||
}
|
||||
|
||||
bugsnagErr := bugsnag.Notify(notifyErr)
|
||||
if bugsnagErr != nil {
|
||||
return ErrBugsnagSendFailed{bugsnagErr}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels enumerates the log levels on which the error should be forwarded to
|
||||
// bugsnag: everything at or above the "Error" level.
|
||||
func (hook *bugsnagHook) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.ErrorLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.PanicLevel,
|
||||
}
|
||||
}
|
64
vendor/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go
generated
vendored
64
vendor/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go
generated
vendored
@ -1,64 +0,0 @@
|
||||
package logrus_bugsnag
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/bugsnag/bugsnag-go"
|
||||
)
|
||||
|
||||
type notice struct {
|
||||
Events []struct {
|
||||
Exceptions []struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"exceptions"`
|
||||
} `json:"events"`
|
||||
}
|
||||
|
||||
func TestNoticeReceived(t *testing.T) {
|
||||
msg := make(chan string, 1)
|
||||
expectedMsg := "foo"
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var notice notice
|
||||
data, _ := ioutil.ReadAll(r.Body)
|
||||
if err := json.Unmarshal(data, ¬ice); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
_ = r.Body.Close()
|
||||
|
||||
msg <- notice.Events[0].Exceptions[0].Message
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
hook := &bugsnagHook{}
|
||||
|
||||
bugsnag.Configure(bugsnag.Configuration{
|
||||
Endpoint: ts.URL,
|
||||
ReleaseStage: "production",
|
||||
APIKey: "12345678901234567890123456789012",
|
||||
Synchronous: true,
|
||||
})
|
||||
|
||||
log := logrus.New()
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"error": errors.New(expectedMsg),
|
||||
}).Error("Bugsnag will not see this string")
|
||||
|
||||
select {
|
||||
case received := <-msg:
|
||||
if received != expectedMsg {
|
||||
t.Errorf("Unexpected message received: %s", received)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Error("Timed out; no notice received by Bugsnag API")
|
||||
}
|
||||
}
|
28
vendor/github.com/Sirupsen/logrus/hooks/papertrail/README.md
generated
vendored
28
vendor/github.com/Sirupsen/logrus/hooks/papertrail/README.md
generated
vendored
@ -1,28 +0,0 @@
|
||||
# Papertrail Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" />
|
||||
|
||||
[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts).
|
||||
|
||||
In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible.
|
||||
|
||||
## Usage
|
||||
|
||||
You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`.
|
||||
|
||||
For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs.
|
||||
|
||||
```go
|
||||
import (
|
||||
"log/syslog"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/papertrail"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := logrus.New()
|
||||
hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME)
|
||||
|
||||
if err == nil {
|
||||
log.Hooks.Add(hook)
|
||||
}
|
||||
}
|
||||
```
|
55
vendor/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
generated
vendored
55
vendor/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go
generated
vendored
@ -1,55 +0,0 @@
|
||||
package logrus_papertrail
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
format = "Jan 2 15:04:05"
|
||||
)
|
||||
|
||||
// PapertrailHook to send logs to a logging service compatible with the Papertrail API.
|
||||
type PapertrailHook struct {
|
||||
Host string
|
||||
Port int
|
||||
AppName string
|
||||
UDPConn net.Conn
|
||||
}
|
||||
|
||||
// NewPapertrailHook creates a hook to be added to an instance of logger.
|
||||
func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) {
|
||||
conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port))
|
||||
return &PapertrailHook{host, port, appName, conn}, err
|
||||
}
|
||||
|
||||
// Fire is called when a log event is fired.
|
||||
func (hook *PapertrailHook) Fire(entry *logrus.Entry) error {
|
||||
date := time.Now().Format(format)
|
||||
msg, _ := entry.String()
|
||||
payload := fmt.Sprintf("<22> %s %s: %s", date, hook.AppName, msg)
|
||||
|
||||
bytesWritten, err := hook.UDPConn.Write([]byte(payload))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels returns the available logging levels.
|
||||
func (hook *PapertrailHook) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.WarnLevel,
|
||||
logrus.InfoLevel,
|
||||
logrus.DebugLevel,
|
||||
}
|
||||
}
|
26
vendor/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go
generated
vendored
26
vendor/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package logrus_papertrail
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/stvp/go-udp-testing"
|
||||
)
|
||||
|
||||
func TestWritingToUDP(t *testing.T) {
|
||||
port := 16661
|
||||
udp.SetAddr(fmt.Sprintf(":%d", port))
|
||||
|
||||
hook, err := NewPapertrailHook("localhost", port, "test")
|
||||
if err != nil {
|
||||
t.Errorf("Unable to connect to local UDP server.")
|
||||
}
|
||||
|
||||
log := logrus.New()
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
udp.ShouldReceive(t, "foo", func() {
|
||||
log.Info("foo")
|
||||
})
|
||||
}
|
111
vendor/github.com/Sirupsen/logrus/hooks/sentry/README.md
generated
vendored
111
vendor/github.com/Sirupsen/logrus/hooks/sentry/README.md
generated
vendored
@ -1,111 +0,0 @@
|
||||
# Sentry Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" />
|
||||
|
||||
[Sentry](https://getsentry.com) provides both self-hosted and hosted
|
||||
solutions for exception tracking.
|
||||
Both client and server are
|
||||
[open source](https://github.com/getsentry/sentry).
|
||||
|
||||
## Usage
|
||||
|
||||
Every sentry application defined on the server gets a different
|
||||
[DSN](https://www.getsentry.com/docs/). In the example below replace
|
||||
`YOUR_DSN` with the one created for your application.
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/sentry"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := logrus.New()
|
||||
hook, err := logrus_sentry.NewSentryHook(YOUR_DSN, []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
log.Hooks.Add(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If you wish to initialize a SentryHook with tags, you can use the `NewWithTagsSentryHook` constructor to provide default tags:
|
||||
|
||||
```go
|
||||
tags := map[string]string{
|
||||
"site": "example.com",
|
||||
}
|
||||
levels := []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
}
|
||||
hook, err := logrus_sentry.NewWithTagsSentryHook(YOUR_DSN, tags, levels)
|
||||
|
||||
```
|
||||
|
||||
If you wish to initialize a SentryHook with an already initialized raven client, you can use
|
||||
the `NewWithClientSentryHook` constructor:
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/sentry"
|
||||
"github.com/getsentry/raven-go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := logrus.New()
|
||||
|
||||
client, err := raven.New(YOUR_DSN)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
hook, err := logrus_sentry.NewWithClientSentryHook(client, []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
log.Hooks.Add(hook)
|
||||
}
|
||||
}
|
||||
|
||||
hook, err := NewWithClientSentryHook(client, []logrus.Level{
|
||||
logrus.ErrorLevel,
|
||||
})
|
||||
```
|
||||
|
||||
## Special fields
|
||||
|
||||
Some logrus fields have a special meaning in this hook,
|
||||
these are `server_name`, `logger` and `http_request`.
|
||||
When logs are sent to sentry these fields are treated differently.
|
||||
- `server_name` (also known as hostname) is the name of the server which
|
||||
is logging the event (hostname.example.com)
|
||||
- `logger` is the part of the application which is logging the event.
|
||||
In go this usually means setting it to the name of the package.
|
||||
- `http_request` is the in-coming request(*http.Request). The detailed request data are sent to Sentry.
|
||||
|
||||
## Timeout
|
||||
|
||||
`Timeout` is the time the sentry hook will wait for a response
|
||||
from the sentry server.
|
||||
|
||||
If this time elapses with no response from
|
||||
the server an error will be returned.
|
||||
|
||||
If `Timeout` is set to 0 the SentryHook will not wait for a reply
|
||||
and will assume a correct delivery.
|
||||
|
||||
The SentryHook has a default timeout of `100 milliseconds` when created
|
||||
with a call to `NewSentryHook`. This can be changed by assigning a value to the `Timeout` field:
|
||||
|
||||
```go
|
||||
hook, _ := logrus_sentry.NewSentryHook(...)
|
||||
hook.Timeout = 20*time.Second
|
||||
```
|
137
vendor/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
generated
vendored
137
vendor/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
generated
vendored
@ -1,137 +0,0 @@
|
||||
package logrus_sentry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/getsentry/raven-go"
|
||||
)
|
||||
|
||||
var (
|
||||
severityMap = map[logrus.Level]raven.Severity{
|
||||
logrus.DebugLevel: raven.DEBUG,
|
||||
logrus.InfoLevel: raven.INFO,
|
||||
logrus.WarnLevel: raven.WARNING,
|
||||
logrus.ErrorLevel: raven.ERROR,
|
||||
logrus.FatalLevel: raven.FATAL,
|
||||
logrus.PanicLevel: raven.FATAL,
|
||||
}
|
||||
)
|
||||
|
||||
func getAndDel(d logrus.Fields, key string) (string, bool) {
|
||||
var (
|
||||
ok bool
|
||||
v interface{}
|
||||
val string
|
||||
)
|
||||
if v, ok = d[key]; !ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
if val, ok = v.(string); !ok {
|
||||
return "", false
|
||||
}
|
||||
delete(d, key)
|
||||
return val, true
|
||||
}
|
||||
|
||||
func getAndDelRequest(d logrus.Fields, key string) (*http.Request, bool) {
|
||||
var (
|
||||
ok bool
|
||||
v interface{}
|
||||
req *http.Request
|
||||
)
|
||||
if v, ok = d[key]; !ok {
|
||||
return nil, false
|
||||
}
|
||||
if req, ok = v.(*http.Request); !ok || req == nil {
|
||||
return nil, false
|
||||
}
|
||||
delete(d, key)
|
||||
return req, true
|
||||
}
|
||||
|
||||
// SentryHook delivers logs to a sentry server.
|
||||
type SentryHook struct {
|
||||
// Timeout sets the time to wait for a delivery error from the sentry server.
|
||||
// If this is set to zero the server will not wait for any response and will
|
||||
// consider the message correctly sent
|
||||
Timeout time.Duration
|
||||
|
||||
client *raven.Client
|
||||
levels []logrus.Level
|
||||
}
|
||||
|
||||
// NewSentryHook creates a hook to be added to an instance of logger
|
||||
// and initializes the raven client.
|
||||
// This method sets the timeout to 100 milliseconds.
|
||||
func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) {
|
||||
client, err := raven.New(DSN)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SentryHook{100 * time.Millisecond, client, levels}, nil
|
||||
}
|
||||
|
||||
// NewWithTagsSentryHook creates a hook with tags to be added to an instance
|
||||
// of logger and initializes the raven client. This method sets the timeout to
|
||||
// 100 milliseconds.
|
||||
func NewWithTagsSentryHook(DSN string, tags map[string]string, levels []logrus.Level) (*SentryHook, error) {
|
||||
client, err := raven.NewWithTags(DSN, tags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SentryHook{100 * time.Millisecond, client, levels}, nil
|
||||
}
|
||||
|
||||
// NewWithClientSentryHook creates a hook using an initialized raven client.
|
||||
// This method sets the timeout to 100 milliseconds.
|
||||
func NewWithClientSentryHook(client *raven.Client, levels []logrus.Level) (*SentryHook, error) {
|
||||
return &SentryHook{100 * time.Millisecond, client, levels}, nil
|
||||
}
|
||||
|
||||
// Called when an event should be sent to sentry
|
||||
// Special fields that sentry uses to give more information to the server
|
||||
// are extracted from entry.Data (if they are found)
|
||||
// These fields are: logger, server_name and http_request
|
||||
func (hook *SentryHook) Fire(entry *logrus.Entry) error {
|
||||
packet := &raven.Packet{
|
||||
Message: entry.Message,
|
||||
Timestamp: raven.Timestamp(entry.Time),
|
||||
Level: severityMap[entry.Level],
|
||||
Platform: "go",
|
||||
}
|
||||
|
||||
d := entry.Data
|
||||
|
||||
if logger, ok := getAndDel(d, "logger"); ok {
|
||||
packet.Logger = logger
|
||||
}
|
||||
if serverName, ok := getAndDel(d, "server_name"); ok {
|
||||
packet.ServerName = serverName
|
||||
}
|
||||
if req, ok := getAndDelRequest(d, "http_request"); ok {
|
||||
packet.Interfaces = append(packet.Interfaces, raven.NewHttp(req))
|
||||
}
|
||||
packet.Extra = map[string]interface{}(d)
|
||||
|
||||
_, errCh := hook.client.Capture(packet, nil)
|
||||
timeout := hook.Timeout
|
||||
if timeout != 0 {
|
||||
timeoutCh := time.After(timeout)
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return err
|
||||
case <-timeoutCh:
|
||||
return fmt.Errorf("no response from sentry server in %s", timeout)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels returns the available logging levels.
|
||||
func (hook *SentryHook) Levels() []logrus.Level {
|
||||
return hook.levels
|
||||
}
|
154
vendor/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
generated
vendored
154
vendor/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
generated
vendored
@ -1,154 +0,0 @@
|
||||
package logrus_sentry
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/getsentry/raven-go"
|
||||
)
|
||||
|
||||
const (
|
||||
message = "error message"
|
||||
server_name = "testserver.internal"
|
||||
logger_name = "test.logger"
|
||||
)
|
||||
|
||||
func getTestLogger() *logrus.Logger {
|
||||
l := logrus.New()
|
||||
l.Out = ioutil.Discard
|
||||
return l
|
||||
}
|
||||
|
||||
func WithTestDSN(t *testing.T, tf func(string, <-chan *raven.Packet)) {
|
||||
pch := make(chan *raven.Packet, 1)
|
||||
s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
defer req.Body.Close()
|
||||
d := json.NewDecoder(req.Body)
|
||||
p := &raven.Packet{}
|
||||
err := d.Decode(p)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
pch <- p
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
fragments := strings.SplitN(s.URL, "://", 2)
|
||||
dsn := fmt.Sprintf(
|
||||
"%s://public:secret@%s/sentry/project-id",
|
||||
fragments[0],
|
||||
fragments[1],
|
||||
)
|
||||
tf(dsn, pch)
|
||||
}
|
||||
|
||||
func TestSpecialFields(t *testing.T) {
|
||||
WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
|
||||
logger := getTestLogger()
|
||||
|
||||
hook, err := NewSentryHook(dsn, []logrus.Level{
|
||||
logrus.ErrorLevel,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
logger.Hooks.Add(hook)
|
||||
|
||||
req, _ := http.NewRequest("GET", "url", nil)
|
||||
logger.WithFields(logrus.Fields{
|
||||
"server_name": server_name,
|
||||
"logger": logger_name,
|
||||
"http_request": req,
|
||||
}).Error(message)
|
||||
|
||||
packet := <-pch
|
||||
if packet.Logger != logger_name {
|
||||
t.Errorf("logger should have been %s, was %s", logger_name, packet.Logger)
|
||||
}
|
||||
|
||||
if packet.ServerName != server_name {
|
||||
t.Errorf("server_name should have been %s, was %s", server_name, packet.ServerName)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestSentryHandler(t *testing.T) {
|
||||
WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
|
||||
logger := getTestLogger()
|
||||
hook, err := NewSentryHook(dsn, []logrus.Level{
|
||||
logrus.ErrorLevel,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
logger.Hooks.Add(hook)
|
||||
|
||||
logger.Error(message)
|
||||
packet := <-pch
|
||||
if packet.Message != message {
|
||||
t.Errorf("message should have been %s, was %s", message, packet.Message)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestSentryWithClient(t *testing.T) {
|
||||
WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
|
||||
logger := getTestLogger()
|
||||
|
||||
client, _ := raven.New(dsn)
|
||||
|
||||
hook, err := NewWithClientSentryHook(client, []logrus.Level{
|
||||
logrus.ErrorLevel,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
logger.Hooks.Add(hook)
|
||||
|
||||
logger.Error(message)
|
||||
packet := <-pch
|
||||
if packet.Message != message {
|
||||
t.Errorf("message should have been %s, was %s", message, packet.Message)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestSentryTags(t *testing.T) {
|
||||
WithTestDSN(t, func(dsn string, pch <-chan *raven.Packet) {
|
||||
logger := getTestLogger()
|
||||
tags := map[string]string{
|
||||
"site": "test",
|
||||
}
|
||||
levels := []logrus.Level{
|
||||
logrus.ErrorLevel,
|
||||
}
|
||||
|
||||
hook, err := NewWithTagsSentryHook(dsn, tags, levels)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
logger.Hooks.Add(hook)
|
||||
|
||||
logger.Error(message)
|
||||
packet := <-pch
|
||||
expected := raven.Tags{
|
||||
raven.Tag{
|
||||
Key: "site",
|
||||
Value: "test",
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(packet.Tags, expected) {
|
||||
t.Errorf("message should have been %s, was %s", message, packet.Message)
|
||||
}
|
||||
})
|
||||
}
|
20
vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md
generated
vendored
20
vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md
generated
vendored
@ -1,20 +0,0 @@
|
||||
# Syslog Hooks for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
import (
|
||||
"log/syslog"
|
||||
"github.com/Sirupsen/logrus"
|
||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := logrus.New()
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
|
||||
if err == nil {
|
||||
log.Hooks.Add(hook)
|
||||
}
|
||||
}
|
||||
```
|
59
vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
generated
vendored
59
vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
generated
vendored
@ -1,59 +0,0 @@
|
||||
package logrus_syslog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"log/syslog"
|
||||
"os"
|
||||
)
|
||||
|
||||
// SyslogHook to send logs via syslog.
|
||||
type SyslogHook struct {
|
||||
Writer *syslog.Writer
|
||||
SyslogNetwork string
|
||||
SyslogRaddr string
|
||||
}
|
||||
|
||||
// Creates a hook to be added to an instance of logger. This is called with
|
||||
// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
|
||||
// `if err == nil { log.Hooks.Add(hook) }`
|
||||
func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
|
||||
w, err := syslog.Dial(network, raddr, priority, tag)
|
||||
return &SyslogHook{w, network, raddr}, err
|
||||
}
|
||||
|
||||
func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
|
||||
line, err := entry.String()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
switch entry.Level {
|
||||
case logrus.PanicLevel:
|
||||
return hook.Writer.Crit(line)
|
||||
case logrus.FatalLevel:
|
||||
return hook.Writer.Crit(line)
|
||||
case logrus.ErrorLevel:
|
||||
return hook.Writer.Err(line)
|
||||
case logrus.WarnLevel:
|
||||
return hook.Writer.Warning(line)
|
||||
case logrus.InfoLevel:
|
||||
return hook.Writer.Info(line)
|
||||
case logrus.DebugLevel:
|
||||
return hook.Writer.Debug(line)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (hook *SyslogHook) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.WarnLevel,
|
||||
logrus.InfoLevel,
|
||||
logrus.DebugLevel,
|
||||
}
|
||||
}
|
26
vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
generated
vendored
26
vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package logrus_syslog
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"log/syslog"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLocalhostAddAndPrint(t *testing.T) {
|
||||
log := logrus.New()
|
||||
hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unable to connect to local syslog.")
|
||||
}
|
||||
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
for _, level := range hook.Levels() {
|
||||
if len(log.Hooks[level]) != 1 {
|
||||
t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Congratulations!")
|
||||
}
|
14
vendor/github.com/codegangsta/cli/autocomplete/bash_autocomplete
generated
vendored
14
vendor/github.com/codegangsta/cli/autocomplete/bash_autocomplete
generated
vendored
@ -1,14 +0,0 @@
|
||||
#! /bin/bash
|
||||
|
||||
: ${PROG:=$(basename ${BASH_SOURCE})}
|
||||
|
||||
_cli_bash_autocomplete() {
|
||||
local cur opts base
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion )
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
}
|
||||
|
||||
complete -F _cli_bash_autocomplete $PROG
|
5
vendor/github.com/codegangsta/cli/autocomplete/zsh_autocomplete
generated
vendored
5
vendor/github.com/codegangsta/cli/autocomplete/zsh_autocomplete
generated
vendored
@ -1,5 +0,0 @@
|
||||
autoload -U compinit && compinit
|
||||
autoload -U bashcompinit && bashcompinit
|
||||
|
||||
script_dir=$(dirname $0)
|
||||
source ${script_dir}/bash_autocomplete
|
52
vendor/github.com/coreos/coreos-cloudinit/config/validate/context.go
generated
vendored
52
vendor/github.com/coreos/coreos-cloudinit/config/validate/context.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// context represents the current position within a newline-delimited string.
|
||||
// Each line is loaded, one by one, into currentLine (newline omitted) and
|
||||
// lineNumber keeps track of its position within the original string.
|
||||
type context struct {
|
||||
currentLine string
|
||||
remainingLines string
|
||||
lineNumber int
|
||||
}
|
||||
|
||||
// Increment moves the context to the next line (if available).
|
||||
func (c *context) Increment() {
|
||||
if c.currentLine == "" && c.remainingLines == "" {
|
||||
return
|
||||
}
|
||||
|
||||
lines := strings.SplitN(c.remainingLines, "\n", 2)
|
||||
c.currentLine = lines[0]
|
||||
if len(lines) == 2 {
|
||||
c.remainingLines = lines[1]
|
||||
} else {
|
||||
c.remainingLines = ""
|
||||
}
|
||||
c.lineNumber++
|
||||
}
|
||||
|
||||
// NewContext creates a context from the provided data. It strips out all
|
||||
// carriage returns and moves to the first line (if available).
|
||||
func NewContext(content []byte) context {
|
||||
c := context{remainingLines: strings.Replace(string(content), "\r", "", -1)}
|
||||
c.Increment()
|
||||
return c
|
||||
}
|
131
vendor/github.com/coreos/coreos-cloudinit/config/validate/context_test.go
generated
vendored
131
vendor/github.com/coreos/coreos-cloudinit/config/validate/context_test.go
generated
vendored
@ -1,131 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewContext(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
|
||||
out context
|
||||
}{
|
||||
{
|
||||
out: context{
|
||||
currentLine: "",
|
||||
remainingLines: "",
|
||||
lineNumber: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "this\r\nis\r\na\r\ntest",
|
||||
out: context{
|
||||
currentLine: "this",
|
||||
remainingLines: "is\na\ntest",
|
||||
lineNumber: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if out := NewContext([]byte(tt.in)); !reflect.DeepEqual(tt.out, out) {
|
||||
t.Errorf("bad context (%q): want %#v, got %#v", tt.in, tt.out, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIncrement(t *testing.T) {
|
||||
tests := []struct {
|
||||
init context
|
||||
op func(c *context)
|
||||
|
||||
res context
|
||||
}{
|
||||
{
|
||||
init: context{
|
||||
currentLine: "",
|
||||
remainingLines: "",
|
||||
lineNumber: 0,
|
||||
},
|
||||
res: context{
|
||||
currentLine: "",
|
||||
remainingLines: "",
|
||||
lineNumber: 0,
|
||||
},
|
||||
op: func(c *context) {
|
||||
c.Increment()
|
||||
},
|
||||
},
|
||||
{
|
||||
init: context{
|
||||
currentLine: "test",
|
||||
remainingLines: "",
|
||||
lineNumber: 1,
|
||||
},
|
||||
res: context{
|
||||
currentLine: "",
|
||||
remainingLines: "",
|
||||
lineNumber: 2,
|
||||
},
|
||||
op: func(c *context) {
|
||||
c.Increment()
|
||||
c.Increment()
|
||||
c.Increment()
|
||||
},
|
||||
},
|
||||
{
|
||||
init: context{
|
||||
currentLine: "this",
|
||||
remainingLines: "is\na\ntest",
|
||||
lineNumber: 1,
|
||||
},
|
||||
res: context{
|
||||
currentLine: "is",
|
||||
remainingLines: "a\ntest",
|
||||
lineNumber: 2,
|
||||
},
|
||||
op: func(c *context) {
|
||||
c.Increment()
|
||||
},
|
||||
},
|
||||
{
|
||||
init: context{
|
||||
currentLine: "this",
|
||||
remainingLines: "is\na\ntest",
|
||||
lineNumber: 1,
|
||||
},
|
||||
res: context{
|
||||
currentLine: "test",
|
||||
remainingLines: "",
|
||||
lineNumber: 4,
|
||||
},
|
||||
op: func(c *context) {
|
||||
c.Increment()
|
||||
c.Increment()
|
||||
c.Increment()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
res := tt.init
|
||||
if tt.op(&res); !reflect.DeepEqual(tt.res, res) {
|
||||
t.Errorf("bad context (%d, %#v): want %#v, got %#v", i, tt.init, tt.res, res)
|
||||
}
|
||||
}
|
||||
}
|
157
vendor/github.com/coreos/coreos-cloudinit/config/validate/node.go
generated
vendored
157
vendor/github.com/coreos/coreos-cloudinit/config/validate/node.go
generated
vendored
@ -1,157 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
yamlKey = regexp.MustCompile(`^ *-? ?(?P<key>.*?):`)
|
||||
yamlElem = regexp.MustCompile(`^ *-`)
|
||||
)
|
||||
|
||||
type node struct {
|
||||
name string
|
||||
line int
|
||||
children []node
|
||||
field reflect.StructField
|
||||
reflect.Value
|
||||
}
|
||||
|
||||
// Child attempts to find the child with the given name in the node's list of
|
||||
// children. If no such child is found, an invalid node is returned.
|
||||
func (n node) Child(name string) node {
|
||||
for _, c := range n.children {
|
||||
if c.name == name {
|
||||
return c
|
||||
}
|
||||
}
|
||||
return node{}
|
||||
}
|
||||
|
||||
// HumanType returns the human-consumable string representation of the type of
|
||||
// the node.
|
||||
func (n node) HumanType() string {
|
||||
switch k := n.Kind(); k {
|
||||
case reflect.Slice:
|
||||
c := n.Type().Elem()
|
||||
return "[]" + node{Value: reflect.New(c).Elem()}.HumanType()
|
||||
default:
|
||||
return k.String()
|
||||
}
|
||||
}
|
||||
|
||||
// NewNode returns the node representation of the given value. The context
|
||||
// will be used in an attempt to determine line numbers for the given value.
|
||||
func NewNode(value interface{}, context context) node {
|
||||
var n node
|
||||
toNode(value, context, &n)
|
||||
return n
|
||||
}
|
||||
|
||||
// toNode converts the given value into a node and then recursively processes
|
||||
// each of the nodes components (e.g. fields, array elements, keys).
|
||||
func toNode(v interface{}, c context, n *node) {
|
||||
vv := reflect.ValueOf(v)
|
||||
if !vv.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
n.Value = vv
|
||||
switch vv.Kind() {
|
||||
case reflect.Struct:
|
||||
// Walk over each field in the structure, skipping unexported fields,
|
||||
// and create a node for it.
|
||||
for i := 0; i < vv.Type().NumField(); i++ {
|
||||
ft := vv.Type().Field(i)
|
||||
k := ft.Tag.Get("yaml")
|
||||
if k == "-" || k == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
cn := node{name: k, field: ft}
|
||||
c, ok := findKey(cn.name, c)
|
||||
if ok {
|
||||
cn.line = c.lineNumber
|
||||
}
|
||||
toNode(vv.Field(i).Interface(), c, &cn)
|
||||
n.children = append(n.children, cn)
|
||||
}
|
||||
case reflect.Map:
|
||||
// Walk over each key in the map and create a node for it.
|
||||
v := v.(map[interface{}]interface{})
|
||||
for k, cv := range v {
|
||||
cn := node{name: fmt.Sprintf("%s", k)}
|
||||
c, ok := findKey(cn.name, c)
|
||||
if ok {
|
||||
cn.line = c.lineNumber
|
||||
}
|
||||
toNode(cv, c, &cn)
|
||||
n.children = append(n.children, cn)
|
||||
}
|
||||
case reflect.Slice:
|
||||
// Walk over each element in the slice and create a node for it.
|
||||
// While iterating over the slice, preserve the context after it
|
||||
// is modified. This allows the line numbers to reflect the current
|
||||
// element instead of the first.
|
||||
for i := 0; i < vv.Len(); i++ {
|
||||
cn := node{
|
||||
name: fmt.Sprintf("%s[%d]", n.name, i),
|
||||
field: n.field,
|
||||
}
|
||||
var ok bool
|
||||
c, ok = findElem(c)
|
||||
if ok {
|
||||
cn.line = c.lineNumber
|
||||
}
|
||||
toNode(vv.Index(i).Interface(), c, &cn)
|
||||
n.children = append(n.children, cn)
|
||||
c.Increment()
|
||||
}
|
||||
case reflect.String, reflect.Int, reflect.Bool, reflect.Float64:
|
||||
default:
|
||||
panic(fmt.Sprintf("toNode(): unhandled kind %s", vv.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// findKey attempts to find the requested key within the provided context.
|
||||
// A modified copy of the context is returned with every line up to the key
|
||||
// incremented past. A boolean, true if the key was found, is also returned.
|
||||
func findKey(key string, context context) (context, bool) {
|
||||
return find(yamlKey, key, context)
|
||||
}
|
||||
|
||||
// findElem attempts to find an array element within the provided context.
|
||||
// A modified copy of the context is returned with every line up to the array
|
||||
// element incremented past. A boolean, true if the key was found, is also
|
||||
// returned.
|
||||
func findElem(context context) (context, bool) {
|
||||
return find(yamlElem, "", context)
|
||||
}
|
||||
|
||||
func find(exp *regexp.Regexp, key string, context context) (context, bool) {
|
||||
for len(context.currentLine) > 0 || len(context.remainingLines) > 0 {
|
||||
matches := exp.FindStringSubmatch(context.currentLine)
|
||||
if len(matches) > 0 && (key == "" || matches[1] == key) {
|
||||
return context, true
|
||||
}
|
||||
|
||||
context.Increment()
|
||||
}
|
||||
return context, false
|
||||
}
|
284
vendor/github.com/coreos/coreos-cloudinit/config/validate/node_test.go
generated
vendored
284
vendor/github.com/coreos/coreos-cloudinit/config/validate/node_test.go
generated
vendored
@ -1,284 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestChild(t *testing.T) {
|
||||
tests := []struct {
|
||||
parent node
|
||||
name string
|
||||
|
||||
child node
|
||||
}{
|
||||
{},
|
||||
{
|
||||
name: "c1",
|
||||
},
|
||||
{
|
||||
parent: node{
|
||||
children: []node{
|
||||
node{name: "c1"},
|
||||
node{name: "c2"},
|
||||
node{name: "c3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
parent: node{
|
||||
children: []node{
|
||||
node{name: "c1"},
|
||||
node{name: "c2"},
|
||||
node{name: "c3"},
|
||||
},
|
||||
},
|
||||
name: "c2",
|
||||
child: node{name: "c2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if child := tt.parent.Child(tt.name); !reflect.DeepEqual(tt.child, child) {
|
||||
t.Errorf("bad child (%q): want %#v, got %#v", tt.name, tt.child, child)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHumanType(t *testing.T) {
|
||||
tests := []struct {
|
||||
node node
|
||||
|
||||
humanType string
|
||||
}{
|
||||
{
|
||||
humanType: "invalid",
|
||||
},
|
||||
{
|
||||
node: node{Value: reflect.ValueOf("hello")},
|
||||
humanType: "string",
|
||||
},
|
||||
{
|
||||
node: node{
|
||||
Value: reflect.ValueOf([]int{1, 2}),
|
||||
children: []node{
|
||||
node{Value: reflect.ValueOf(1)},
|
||||
node{Value: reflect.ValueOf(2)},
|
||||
}},
|
||||
humanType: "[]int",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if humanType := tt.node.HumanType(); tt.humanType != humanType {
|
||||
t.Errorf("bad type (%q): want %q, got %q", tt.node, tt.humanType, humanType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToNode(t *testing.T) {
|
||||
tests := []struct {
|
||||
value interface{}
|
||||
context context
|
||||
|
||||
node node
|
||||
}{
|
||||
{},
|
||||
{
|
||||
value: struct{}{},
|
||||
node: node{Value: reflect.ValueOf(struct{}{})},
|
||||
},
|
||||
{
|
||||
value: struct {
|
||||
A int `yaml:"a"`
|
||||
}{},
|
||||
node: node{
|
||||
children: []node{
|
||||
node{
|
||||
name: "a",
|
||||
field: reflect.TypeOf(struct {
|
||||
A int `yaml:"a"`
|
||||
}{}).Field(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
value: struct {
|
||||
A []int `yaml:"a"`
|
||||
}{},
|
||||
node: node{
|
||||
children: []node{
|
||||
node{
|
||||
name: "a",
|
||||
field: reflect.TypeOf(struct {
|
||||
A []int `yaml:"a"`
|
||||
}{}).Field(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
value: map[interface{}]interface{}{
|
||||
"a": map[interface{}]interface{}{
|
||||
"b": 2,
|
||||
},
|
||||
},
|
||||
context: NewContext([]byte("a:\n b: 2")),
|
||||
node: node{
|
||||
children: []node{
|
||||
node{
|
||||
line: 1,
|
||||
name: "a",
|
||||
children: []node{
|
||||
node{name: "b", line: 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
value: struct {
|
||||
A struct {
|
||||
Jon bool `yaml:"b"`
|
||||
} `yaml:"a"`
|
||||
}{},
|
||||
node: node{
|
||||
children: []node{
|
||||
node{
|
||||
name: "a",
|
||||
children: []node{
|
||||
node{
|
||||
name: "b",
|
||||
field: reflect.TypeOf(struct {
|
||||
Jon bool `yaml:"b"`
|
||||
}{}).Field(0),
|
||||
Value: reflect.ValueOf(false),
|
||||
},
|
||||
},
|
||||
field: reflect.TypeOf(struct {
|
||||
A struct {
|
||||
Jon bool `yaml:"b"`
|
||||
} `yaml:"a"`
|
||||
}{}).Field(0),
|
||||
Value: reflect.ValueOf(struct {
|
||||
Jon bool `yaml:"b"`
|
||||
}{}),
|
||||
},
|
||||
},
|
||||
Value: reflect.ValueOf(struct {
|
||||
A struct {
|
||||
Jon bool `yaml:"b"`
|
||||
} `yaml:"a"`
|
||||
}{}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
var node node
|
||||
toNode(tt.value, tt.context, &node)
|
||||
if !nodesEqual(tt.node, node) {
|
||||
t.Errorf("bad node (%#v): want %#v, got %#v", tt.value, tt.node, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
key string
|
||||
context context
|
||||
|
||||
found bool
|
||||
}{
|
||||
{},
|
||||
{
|
||||
key: "key1",
|
||||
context: NewContext([]byte("key1: hi")),
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
key: "key2",
|
||||
context: NewContext([]byte("key1: hi")),
|
||||
found: false,
|
||||
},
|
||||
{
|
||||
key: "key3",
|
||||
context: NewContext([]byte("key1:\n key2:\n key3: hi")),
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
key: "key4",
|
||||
context: NewContext([]byte("key1:\n - key4: hi")),
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
key: "key5",
|
||||
context: NewContext([]byte("#key5")),
|
||||
found: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if _, found := findKey(tt.key, tt.context); tt.found != found {
|
||||
t.Errorf("bad find (%q): want %t, got %t", tt.key, tt.found, found)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindElem(t *testing.T) {
|
||||
tests := []struct {
|
||||
context context
|
||||
|
||||
found bool
|
||||
}{
|
||||
{},
|
||||
{
|
||||
context: NewContext([]byte("test: hi")),
|
||||
found: false,
|
||||
},
|
||||
{
|
||||
context: NewContext([]byte("test:\n - a\n -b")),
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
context: NewContext([]byte("test:\n -\n a")),
|
||||
found: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if _, found := findElem(tt.context); tt.found != found {
|
||||
t.Errorf("bad find (%q): want %t, got %t", tt.context, tt.found, found)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func nodesEqual(a, b node) bool {
|
||||
if a.name != b.name ||
|
||||
a.line != b.line ||
|
||||
!reflect.DeepEqual(a.field, b.field) ||
|
||||
len(a.children) != len(b.children) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(a.children); i++ {
|
||||
if !nodesEqual(a.children[i], b.children[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
88
vendor/github.com/coreos/coreos-cloudinit/config/validate/report.go
generated
vendored
88
vendor/github.com/coreos/coreos-cloudinit/config/validate/report.go
generated
vendored
@ -1,88 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Report represents the list of entries resulting from validation.
|
||||
type Report struct {
|
||||
entries []Entry
|
||||
}
|
||||
|
||||
// Error adds an error entry to the report.
|
||||
func (r *Report) Error(line int, message string) {
|
||||
r.entries = append(r.entries, Entry{entryError, message, line})
|
||||
}
|
||||
|
||||
// Warning adds a warning entry to the report.
|
||||
func (r *Report) Warning(line int, message string) {
|
||||
r.entries = append(r.entries, Entry{entryWarning, message, line})
|
||||
}
|
||||
|
||||
// Info adds an info entry to the report.
|
||||
func (r *Report) Info(line int, message string) {
|
||||
r.entries = append(r.entries, Entry{entryInfo, message, line})
|
||||
}
|
||||
|
||||
// Entries returns the list of entries in the report.
|
||||
func (r *Report) Entries() []Entry {
|
||||
return r.entries
|
||||
}
|
||||
|
||||
// Entry represents a single generic item in the report.
|
||||
type Entry struct {
|
||||
kind entryKind
|
||||
message string
|
||||
line int
|
||||
}
|
||||
|
||||
// String returns a human-readable representation of the entry.
|
||||
func (e Entry) String() string {
|
||||
return fmt.Sprintf("line %d: %s: %s", e.line, e.kind, e.message)
|
||||
}
|
||||
|
||||
// MarshalJSON satisfies the json.Marshaler interface, returning the entry
|
||||
// encoded as a JSON object.
|
||||
func (e Entry) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]interface{}{
|
||||
"kind": e.kind.String(),
|
||||
"message": e.message,
|
||||
"line": e.line,
|
||||
})
|
||||
}
|
||||
|
||||
type entryKind int
|
||||
|
||||
const (
|
||||
entryError entryKind = iota
|
||||
entryWarning
|
||||
entryInfo
|
||||
)
|
||||
|
||||
func (k entryKind) String() string {
|
||||
switch k {
|
||||
case entryError:
|
||||
return "error"
|
||||
case entryWarning:
|
||||
return "warning"
|
||||
case entryInfo:
|
||||
return "info"
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid kind %d", k))
|
||||
}
|
||||
}
|
96
vendor/github.com/coreos/coreos-cloudinit/config/validate/report_test.go
generated
vendored
96
vendor/github.com/coreos/coreos-cloudinit/config/validate/report_test.go
generated
vendored
@ -1,96 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEntry(t *testing.T) {
|
||||
tests := []struct {
|
||||
entry Entry
|
||||
|
||||
str string
|
||||
json []byte
|
||||
}{
|
||||
{
|
||||
Entry{entryInfo, "test info", 1},
|
||||
"line 1: info: test info",
|
||||
[]byte(`{"kind":"info","line":1,"message":"test info"}`),
|
||||
},
|
||||
{
|
||||
Entry{entryWarning, "test warning", 1},
|
||||
"line 1: warning: test warning",
|
||||
[]byte(`{"kind":"warning","line":1,"message":"test warning"}`),
|
||||
},
|
||||
{
|
||||
Entry{entryError, "test error", 2},
|
||||
"line 2: error: test error",
|
||||
[]byte(`{"kind":"error","line":2,"message":"test error"}`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if str := tt.entry.String(); tt.str != str {
|
||||
t.Errorf("bad string (%q): want %q, got %q", tt.entry, tt.str, str)
|
||||
}
|
||||
json, err := tt.entry.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Errorf("bad error (%q): want %v, got %q", tt.entry, nil, err)
|
||||
}
|
||||
if !bytes.Equal(tt.json, json) {
|
||||
t.Errorf("bad JSON (%q): want %q, got %q", tt.entry, tt.json, json)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReport(t *testing.T) {
|
||||
type reportFunc struct {
|
||||
fn func(*Report, int, string)
|
||||
line int
|
||||
message string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
fs []reportFunc
|
||||
|
||||
es []Entry
|
||||
}{
|
||||
{
|
||||
[]reportFunc{
|
||||
{(*Report).Warning, 1, "test warning 1"},
|
||||
{(*Report).Error, 2, "test error 2"},
|
||||
{(*Report).Info, 10, "test info 10"},
|
||||
},
|
||||
[]Entry{
|
||||
Entry{entryWarning, "test warning 1", 1},
|
||||
Entry{entryError, "test error 2", 2},
|
||||
Entry{entryInfo, "test info 10", 10},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
r := Report{}
|
||||
for _, f := range tt.fs {
|
||||
f.fn(&r, f.line, f.message)
|
||||
}
|
||||
if es := r.Entries(); !reflect.DeepEqual(tt.es, es) {
|
||||
t.Errorf("bad entries (%v): want %#v, got %#v", tt.fs, tt.es, es)
|
||||
}
|
||||
}
|
||||
}
|
177
vendor/github.com/coreos/coreos-cloudinit/config/validate/rules.go
generated
vendored
177
vendor/github.com/coreos/coreos-cloudinit/config/validate/rules.go
generated
vendored
@ -1,177 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/config"
|
||||
)
|
||||
|
||||
type rule func(config node, report *Report)
|
||||
|
||||
// Rules contains all of the validation rules.
|
||||
var Rules []rule = []rule{
|
||||
checkDiscoveryUrl,
|
||||
checkEncoding,
|
||||
checkStructure,
|
||||
checkValidity,
|
||||
checkWriteFiles,
|
||||
checkWriteFilesUnderCoreos,
|
||||
}
|
||||
|
||||
// checkDiscoveryUrl verifies that the string is a valid url.
|
||||
func checkDiscoveryUrl(cfg node, report *Report) {
|
||||
c := cfg.Child("coreos").Child("etcd").Child("discovery")
|
||||
if !c.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := url.ParseRequestURI(c.String()); err != nil {
|
||||
report.Warning(c.line, "discovery URL is not valid")
|
||||
}
|
||||
}
|
||||
|
||||
// checkEncoding validates that, for each file under 'write_files', the
|
||||
// content can be decoded given the specified encoding.
|
||||
func checkEncoding(cfg node, report *Report) {
|
||||
for _, f := range cfg.Child("write_files").children {
|
||||
e := f.Child("encoding")
|
||||
if !e.IsValid() {
|
||||
continue
|
||||
}
|
||||
|
||||
c := f.Child("content")
|
||||
if _, err := config.DecodeContent(c.String(), e.String()); err != nil {
|
||||
report.Error(c.line, fmt.Sprintf("content cannot be decoded as %q", e.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkStructure compares the provided config to the empty config.CloudConfig
|
||||
// structure. Each node is checked to make sure that it exists in the known
|
||||
// structure and that its type is compatible.
|
||||
func checkStructure(cfg node, report *Report) {
|
||||
g := NewNode(config.CloudConfig{}, NewContext([]byte{}))
|
||||
checkNodeStructure(cfg, g, report)
|
||||
}
|
||||
|
||||
func checkNodeStructure(n, g node, r *Report) {
|
||||
if !isCompatible(n.Kind(), g.Kind()) {
|
||||
r.Warning(n.line, fmt.Sprintf("incorrect type for %q (want %s)", n.name, g.HumanType()))
|
||||
return
|
||||
}
|
||||
|
||||
switch g.Kind() {
|
||||
case reflect.Struct:
|
||||
for _, cn := range n.children {
|
||||
if cg := g.Child(cn.name); cg.IsValid() {
|
||||
checkNodeStructure(cn, cg, r)
|
||||
} else {
|
||||
r.Warning(cn.line, fmt.Sprintf("unrecognized key %q", cn.name))
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
for _, cn := range n.children {
|
||||
var cg node
|
||||
c := g.Type().Elem()
|
||||
toNode(reflect.New(c).Elem().Interface(), context{}, &cg)
|
||||
checkNodeStructure(cn, cg, r)
|
||||
}
|
||||
case reflect.String, reflect.Int, reflect.Float64, reflect.Bool:
|
||||
default:
|
||||
panic(fmt.Sprintf("checkNodeStructure(): unhandled kind %s", g.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// isCompatible determines if the type of kind n can be converted to the type
|
||||
// of kind g in the context of YAML. This is not an exhaustive list, but its
|
||||
// enough for the purposes of cloud-config validation.
|
||||
func isCompatible(n, g reflect.Kind) bool {
|
||||
switch g {
|
||||
case reflect.String:
|
||||
return n == reflect.String || n == reflect.Int || n == reflect.Float64 || n == reflect.Bool
|
||||
case reflect.Struct:
|
||||
return n == reflect.Struct || n == reflect.Map
|
||||
case reflect.Float64:
|
||||
return n == reflect.Float64 || n == reflect.Int
|
||||
case reflect.Bool, reflect.Slice, reflect.Int:
|
||||
return n == g
|
||||
default:
|
||||
panic(fmt.Sprintf("isCompatible(): unhandled kind %s", g))
|
||||
}
|
||||
}
|
||||
|
||||
// checkValidity checks the value of every node in the provided config by
|
||||
// running config.AssertValid() on it.
|
||||
func checkValidity(cfg node, report *Report) {
|
||||
g := NewNode(config.CloudConfig{}, NewContext([]byte{}))
|
||||
checkNodeValidity(cfg, g, report)
|
||||
}
|
||||
|
||||
func checkNodeValidity(n, g node, r *Report) {
|
||||
if err := config.AssertValid(n.Value, g.field.Tag.Get("valid")); err != nil {
|
||||
r.Error(n.line, fmt.Sprintf("invalid value %v", n.Value.Interface()))
|
||||
}
|
||||
switch g.Kind() {
|
||||
case reflect.Struct:
|
||||
for _, cn := range n.children {
|
||||
if cg := g.Child(cn.name); cg.IsValid() {
|
||||
checkNodeValidity(cn, cg, r)
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
for _, cn := range n.children {
|
||||
var cg node
|
||||
c := g.Type().Elem()
|
||||
toNode(reflect.New(c).Elem().Interface(), context{}, &cg)
|
||||
checkNodeValidity(cn, cg, r)
|
||||
}
|
||||
case reflect.String, reflect.Int, reflect.Float64, reflect.Bool:
|
||||
default:
|
||||
panic(fmt.Sprintf("checkNodeValidity(): unhandled kind %s", g.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// checkWriteFiles checks to make sure that the target file can actually be
|
||||
// written. Note that this check is approximate (it only checks to see if the file
|
||||
// is under /usr).
|
||||
func checkWriteFiles(cfg node, report *Report) {
|
||||
for _, f := range cfg.Child("write_files").children {
|
||||
c := f.Child("path")
|
||||
if !c.IsValid() {
|
||||
continue
|
||||
}
|
||||
|
||||
d := path.Dir(c.String())
|
||||
switch {
|
||||
case strings.HasPrefix(d, "/usr"):
|
||||
report.Error(c.line, "file cannot be written to a read-only filesystem")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkWriteFilesUnderCoreos checks to see if the 'write_files' node is a
|
||||
// child of 'coreos' (it shouldn't be).
|
||||
func checkWriteFilesUnderCoreos(cfg node, report *Report) {
|
||||
c := cfg.Child("coreos").Child("write_files")
|
||||
if c.IsValid() {
|
||||
report.Info(c.line, "write_files doesn't belong under coreos")
|
||||
}
|
||||
}
|
399
vendor/github.com/coreos/coreos-cloudinit/config/validate/rules_test.go
generated
vendored
399
vendor/github.com/coreos/coreos-cloudinit/config/validate/rules_test.go
generated
vendored
@ -1,399 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCheckDiscoveryUrl(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
entries []Entry
|
||||
}{
|
||||
{},
|
||||
{
|
||||
config: "coreos:\n etcd:\n discovery: https://discovery.etcd.io/00000000000000000000000000000000",
|
||||
},
|
||||
{
|
||||
config: "coreos:\n etcd:\n discovery: http://custom.domain/mytoken",
|
||||
},
|
||||
{
|
||||
config: "coreos:\n etcd:\n discovery: disco",
|
||||
entries: []Entry{{entryWarning, "discovery URL is not valid", 3}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
r := Report{}
|
||||
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
checkDiscoveryUrl(n, &r)
|
||||
|
||||
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckEncoding(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
entries []Entry
|
||||
}{
|
||||
{},
|
||||
{
|
||||
config: "write_files:\n - encoding: base64\n content: aGVsbG8K",
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - content: !!binary aGVsbG8K",
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - encoding: base64\n content: !!binary aGVsbG8K",
|
||||
entries: []Entry{{entryError, `content cannot be decoded as "base64"`, 3}},
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - encoding: base64\n content: !!binary YUdWc2JHOEsK",
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - encoding: gzip\n content: !!binary H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - encoding: gzip+base64\n content: H4sIAOC3tVQAA8tIzcnJ5wIAIDA6NgYAAAA=",
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - encoding: custom\n content: hello",
|
||||
entries: []Entry{{entryError, `content cannot be decoded as "custom"`, 3}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
r := Report{}
|
||||
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
checkEncoding(n, &r)
|
||||
|
||||
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckStructure(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
entries []Entry
|
||||
}{
|
||||
{},
|
||||
|
||||
// Test for unrecognized keys
|
||||
{
|
||||
config: "test:",
|
||||
entries: []Entry{{entryWarning, "unrecognized key \"test\"", 1}},
|
||||
},
|
||||
{
|
||||
config: "coreos:\n etcd:\n bad:",
|
||||
entries: []Entry{{entryWarning, "unrecognized key \"bad\"", 3}},
|
||||
},
|
||||
{
|
||||
config: "coreos:\n etcd:\n discovery: good",
|
||||
},
|
||||
|
||||
// Test for error on list of nodes
|
||||
{
|
||||
config: "coreos:\n units:\n - hello\n - goodbye",
|
||||
entries: []Entry{
|
||||
{entryWarning, "incorrect type for \"units[0]\" (want struct)", 3},
|
||||
{entryWarning, "incorrect type for \"units[1]\" (want struct)", 4},
|
||||
},
|
||||
},
|
||||
|
||||
// Test for incorrect types
|
||||
// Want boolean
|
||||
{
|
||||
config: "coreos:\n units:\n - enable: true",
|
||||
},
|
||||
{
|
||||
config: "coreos:\n units:\n - enable: 4",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"enable\" (want bool)", 3}},
|
||||
},
|
||||
{
|
||||
config: "coreos:\n units:\n - enable: bad",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"enable\" (want bool)", 3}},
|
||||
},
|
||||
{
|
||||
config: "coreos:\n units:\n - enable:\n bad:",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"enable\" (want bool)", 3}},
|
||||
},
|
||||
{
|
||||
config: "coreos:\n units:\n - enable:\n - bad",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"enable\" (want bool)", 3}},
|
||||
},
|
||||
// Want string
|
||||
{
|
||||
config: "hostname: true",
|
||||
},
|
||||
{
|
||||
config: "hostname: 4",
|
||||
},
|
||||
{
|
||||
config: "hostname: host",
|
||||
},
|
||||
{
|
||||
config: "hostname:\n name:",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"hostname\" (want string)", 1}},
|
||||
},
|
||||
{
|
||||
config: "hostname:\n - name",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"hostname\" (want string)", 1}},
|
||||
},
|
||||
// Want struct
|
||||
{
|
||||
config: "coreos: true",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"coreos\" (want struct)", 1}},
|
||||
},
|
||||
{
|
||||
config: "coreos: 4",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"coreos\" (want struct)", 1}},
|
||||
},
|
||||
{
|
||||
config: "coreos: hello",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"coreos\" (want struct)", 1}},
|
||||
},
|
||||
{
|
||||
config: "coreos:\n etcd:\n discovery: fire in the disco",
|
||||
},
|
||||
{
|
||||
config: "coreos:\n - hello",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"coreos\" (want struct)", 1}},
|
||||
},
|
||||
// Want []string
|
||||
{
|
||||
config: "ssh_authorized_keys: true",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"ssh_authorized_keys\" (want []string)", 1}},
|
||||
},
|
||||
{
|
||||
config: "ssh_authorized_keys: 4",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"ssh_authorized_keys\" (want []string)", 1}},
|
||||
},
|
||||
{
|
||||
config: "ssh_authorized_keys: key",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"ssh_authorized_keys\" (want []string)", 1}},
|
||||
},
|
||||
{
|
||||
config: "ssh_authorized_keys:\n key: value",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"ssh_authorized_keys\" (want []string)", 1}},
|
||||
},
|
||||
{
|
||||
config: "ssh_authorized_keys:\n - key",
|
||||
},
|
||||
{
|
||||
config: "ssh_authorized_keys:\n - key: value",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"ssh_authorized_keys[0]\" (want string)", 2}},
|
||||
},
|
||||
// Want []struct
|
||||
{
|
||||
config: "users:\n true",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users\" (want []struct)", 1}},
|
||||
},
|
||||
{
|
||||
config: "users:\n 4",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users\" (want []struct)", 1}},
|
||||
},
|
||||
{
|
||||
config: "users:\n bad",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users\" (want []struct)", 1}},
|
||||
},
|
||||
{
|
||||
config: "users:\n bad:",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users\" (want []struct)", 1}},
|
||||
},
|
||||
{
|
||||
config: "users:\n - name: good",
|
||||
},
|
||||
// Want struct within array
|
||||
{
|
||||
config: "users:\n - true",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users[0]\" (want struct)", 2}},
|
||||
},
|
||||
{
|
||||
config: "users:\n - name: hi\n - true",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users[1]\" (want struct)", 3}},
|
||||
},
|
||||
{
|
||||
config: "users:\n - 4",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users[0]\" (want struct)", 2}},
|
||||
},
|
||||
{
|
||||
config: "users:\n - bad",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users[0]\" (want struct)", 2}},
|
||||
},
|
||||
{
|
||||
config: "users:\n - - bad",
|
||||
entries: []Entry{{entryWarning, "incorrect type for \"users[0]\" (want struct)", 2}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
r := Report{}
|
||||
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
checkStructure(n, &r)
|
||||
|
||||
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckValidity(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
entries []Entry
|
||||
}{
|
||||
// string
|
||||
{
|
||||
config: "hostname: test",
|
||||
},
|
||||
|
||||
// int
|
||||
{
|
||||
config: "coreos:\n fleet:\n verbosity: 2",
|
||||
},
|
||||
|
||||
// bool
|
||||
{
|
||||
config: "coreos:\n units:\n - enable: true",
|
||||
},
|
||||
|
||||
// slice
|
||||
{
|
||||
config: "coreos:\n units:\n - command: start\n - name: stop",
|
||||
},
|
||||
{
|
||||
config: "coreos:\n units:\n - command: lol",
|
||||
entries: []Entry{{entryError, "invalid value lol", 3}},
|
||||
},
|
||||
|
||||
// struct
|
||||
{
|
||||
config: "coreos:\n update:\n reboot_strategy: off",
|
||||
},
|
||||
{
|
||||
config: "coreos:\n update:\n reboot_strategy: always",
|
||||
entries: []Entry{{entryError, "invalid value always", 3}},
|
||||
},
|
||||
|
||||
// unknown
|
||||
{
|
||||
config: "unknown: hi",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
r := Report{}
|
||||
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
checkValidity(n, &r)
|
||||
|
||||
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckWriteFiles(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
entries []Entry
|
||||
}{
|
||||
{},
|
||||
{
|
||||
config: "write_files:\n - path: /valid",
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - path: /tmp/usr/valid",
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - path: /usr/invalid",
|
||||
entries: []Entry{{entryError, "file cannot be written to a read-only filesystem", 2}},
|
||||
},
|
||||
{
|
||||
config: "write-files:\n - path: /tmp/../usr/invalid",
|
||||
entries: []Entry{{entryError, "file cannot be written to a read-only filesystem", 2}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
r := Report{}
|
||||
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
checkWriteFiles(n, &r)
|
||||
|
||||
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckWriteFilesUnderCoreos(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
entries []Entry
|
||||
}{
|
||||
{},
|
||||
{
|
||||
config: "write_files:\n - path: /hi",
|
||||
},
|
||||
{
|
||||
config: "coreos:\n write_files:\n - path: /hi",
|
||||
entries: []Entry{{entryInfo, "write_files doesn't belong under coreos", 2}},
|
||||
},
|
||||
{
|
||||
config: "coreos:\n write-files:\n - path: /hyphen",
|
||||
entries: []Entry{{entryInfo, "write_files doesn't belong under coreos", 2}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
r := Report{}
|
||||
n, err := parseCloudConfig([]byte(tt.config), &r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
checkWriteFilesUnderCoreos(n, &r)
|
||||
|
||||
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||
t.Errorf("bad report (%d, %q): want %#v, got %#v", i, tt.config, tt.entries, e)
|
||||
}
|
||||
}
|
||||
}
|
162
vendor/github.com/coreos/coreos-cloudinit/config/validate/validate.go
generated
vendored
162
vendor/github.com/coreos/coreos-cloudinit/config/validate/validate.go
generated
vendored
@ -1,162 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/config"
|
||||
|
||||
"github.com/coreos/yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
yamlLineError = regexp.MustCompile(`^YAML error: line (?P<line>[[:digit:]]+): (?P<msg>.*)$`)
|
||||
yamlError = regexp.MustCompile(`^YAML error: (?P<msg>.*)$`)
|
||||
)
|
||||
|
||||
// Validate runs a series of validation tests against the given userdata and
|
||||
// returns a report detailing all of the issues. Presently, only cloud-configs
|
||||
// can be validated.
|
||||
func Validate(userdataBytes []byte) (Report, error) {
|
||||
switch {
|
||||
case len(userdataBytes) == 0:
|
||||
return Report{}, nil
|
||||
case config.IsScript(string(userdataBytes)):
|
||||
return Report{}, nil
|
||||
case config.IsCloudConfig(string(userdataBytes)):
|
||||
return validateCloudConfig(userdataBytes, Rules)
|
||||
default:
|
||||
return Report{entries: []Entry{
|
||||
Entry{kind: entryError, message: `must be "#cloud-config" or begin with "#!"`, line: 1},
|
||||
}}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// validateCloudConfig runs all of the validation rules in Rules and returns
|
||||
// the resulting report and any errors encountered.
|
||||
func validateCloudConfig(config []byte, rules []rule) (report Report, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("%v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
c, err := parseCloudConfig(config, &report)
|
||||
if err != nil {
|
||||
return report, err
|
||||
}
|
||||
|
||||
for _, r := range rules {
|
||||
r(c, &report)
|
||||
}
|
||||
return report, nil
|
||||
}
|
||||
|
||||
// parseCloudConfig parses the provided config into a node structure and logs
|
||||
// any parsing issues into the provided report. Unrecoverable errors are
|
||||
// returned as an error.
|
||||
func parseCloudConfig(cfg []byte, report *Report) (node, error) {
|
||||
yaml.UnmarshalMappingKeyTransform = func(nameIn string) (nameOut string) {
|
||||
return nameIn
|
||||
}
|
||||
// unmarshal the config into an implicitly-typed form. The yaml library
|
||||
// will implicitly convert types into their normalized form
|
||||
// (e.g. 0744 -> 484, off -> false).
|
||||
var weak map[interface{}]interface{}
|
||||
if err := yaml.Unmarshal(cfg, &weak); err != nil {
|
||||
matches := yamlLineError.FindStringSubmatch(err.Error())
|
||||
if len(matches) == 3 {
|
||||
line, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
return node{}, err
|
||||
}
|
||||
msg := matches[2]
|
||||
report.Error(line, msg)
|
||||
return node{}, nil
|
||||
}
|
||||
|
||||
matches = yamlError.FindStringSubmatch(err.Error())
|
||||
if len(matches) == 2 {
|
||||
report.Error(1, matches[1])
|
||||
return node{}, nil
|
||||
}
|
||||
|
||||
return node{}, errors.New("couldn't parse yaml error")
|
||||
}
|
||||
w := NewNode(weak, NewContext(cfg))
|
||||
w = normalizeNodeNames(w, report)
|
||||
|
||||
// unmarshal the config into the explicitly-typed form.
|
||||
yaml.UnmarshalMappingKeyTransform = func(nameIn string) (nameOut string) {
|
||||
return strings.Replace(nameIn, "-", "_", -1)
|
||||
}
|
||||
var strong config.CloudConfig
|
||||
if err := yaml.Unmarshal([]byte(cfg), &strong); err != nil {
|
||||
return node{}, err
|
||||
}
|
||||
s := NewNode(strong, NewContext(cfg))
|
||||
|
||||
// coerceNodes weak nodes and strong nodes. strong nodes replace weak nodes
|
||||
// if they are compatible types (this happens when the yaml library
|
||||
// converts the input).
|
||||
// (e.g. weak 484 is replaced by strong 0744, weak 4 is not replaced by
|
||||
// strong false)
|
||||
return coerceNodes(w, s), nil
|
||||
}
|
||||
|
||||
// coerceNodes recursively evaluates two nodes, returning a new node containing
|
||||
// either the weak or strong node's value and its recursively processed
|
||||
// children. The strong node's value is used if the two nodes are leafs, are
|
||||
// both valid, and are compatible types (defined by isCompatible()). The weak
|
||||
// node is returned in all other cases. coerceNodes is used to counteract the
|
||||
// effects of yaml's automatic type conversion. The weak node is the one
|
||||
// resulting from unmarshalling into an empty interface{} (the type is
|
||||
// inferred). The strong node is the one resulting from unmarshalling into a
|
||||
// struct. If the two nodes are of compatible types, the yaml library correctly
|
||||
// parsed the value into the strongly typed unmarshalling. In this case, we
|
||||
// prefer the strong node because its actually the type we are expecting.
|
||||
func coerceNodes(w, s node) node {
|
||||
n := w
|
||||
n.children = nil
|
||||
if len(w.children) == 0 && len(s.children) == 0 &&
|
||||
w.IsValid() && s.IsValid() &&
|
||||
isCompatible(w.Kind(), s.Kind()) {
|
||||
n.Value = s.Value
|
||||
}
|
||||
|
||||
for _, cw := range w.children {
|
||||
n.children = append(n.children, coerceNodes(cw, s.Child(cw.name)))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// normalizeNodeNames replaces all occurences of '-' with '_' within key names
|
||||
// and makes a note of each replacement in the report.
|
||||
func normalizeNodeNames(node node, report *Report) node {
|
||||
if strings.Contains(node.name, "-") {
|
||||
// TODO(crawford): Enable this message once the new validator hits stable.
|
||||
//report.Info(node.line, fmt.Sprintf("%q uses '-' instead of '_'", node.name))
|
||||
node.name = strings.Replace(node.name, "-", "_", -1)
|
||||
}
|
||||
for i := range node.children {
|
||||
node.children[i] = normalizeNodeNames(node.children[i], report)
|
||||
}
|
||||
return node
|
||||
}
|
167
vendor/github.com/coreos/coreos-cloudinit/config/validate/validate_test.go
generated
vendored
167
vendor/github.com/coreos/coreos-cloudinit/config/validate/validate_test.go
generated
vendored
@ -1,167 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package validate
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseCloudConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
entries []Entry
|
||||
}{
|
||||
{},
|
||||
{
|
||||
config: " ",
|
||||
entries: []Entry{{entryError, "found character that cannot start any token", 1}},
|
||||
},
|
||||
{
|
||||
config: "a:\na",
|
||||
entries: []Entry{{entryError, "could not find expected ':'", 2}},
|
||||
},
|
||||
{
|
||||
config: "#hello\na:\na",
|
||||
entries: []Entry{{entryError, "could not find expected ':'", 3}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
r := Report{}
|
||||
parseCloudConfig([]byte(tt.config), &r)
|
||||
|
||||
if e := r.Entries(); !reflect.DeepEqual(tt.entries, e) {
|
||||
t.Errorf("bad report (%s): want %#v, got %#v", tt.config, tt.entries, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateCloudConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
rules []rule
|
||||
|
||||
report Report
|
||||
err error
|
||||
}{
|
||||
{
|
||||
rules: []rule{func(_ node, _ *Report) { panic("something happened") }},
|
||||
err: errors.New("something happened"),
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - permissions: 0744",
|
||||
rules: Rules,
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - permissions: '0744'",
|
||||
rules: Rules,
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - permissions: 744",
|
||||
rules: Rules,
|
||||
},
|
||||
{
|
||||
config: "write_files:\n - permissions: '744'",
|
||||
rules: Rules,
|
||||
},
|
||||
{
|
||||
config: "coreos:\n update:\n reboot-strategy: off",
|
||||
rules: Rules,
|
||||
},
|
||||
{
|
||||
config: "coreos:\n update:\n reboot-strategy: false",
|
||||
rules: Rules,
|
||||
report: Report{entries: []Entry{{entryError, "invalid value false", 3}}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
r, err := validateCloudConfig([]byte(tt.config), tt.rules)
|
||||
if !reflect.DeepEqual(tt.err, err) {
|
||||
t.Errorf("bad error (%s): want %v, got %v", tt.config, tt.err, err)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.report, r) {
|
||||
t.Errorf("bad report (%s): want %+v, got %+v", tt.config, tt.report, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
config string
|
||||
|
||||
report Report
|
||||
}{
|
||||
{},
|
||||
{
|
||||
config: "#!/bin/bash\necho hey",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
r, err := Validate([]byte(tt.config))
|
||||
if err != nil {
|
||||
t.Errorf("bad error (case #%d): want %v, got %v", i, nil, err)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.report, r) {
|
||||
t.Errorf("bad report (case #%d): want %+v, got %+v", i, tt.report, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkValidate(b *testing.B) {
|
||||
config := `#cloud-config
|
||||
hostname: test
|
||||
|
||||
coreos:
|
||||
etcd:
|
||||
name: node001
|
||||
discovery: https://discovery.etcd.io/disco
|
||||
addr: $public_ipv4:4001
|
||||
peer-addr: $private_ipv4:7001
|
||||
fleet:
|
||||
verbosity: 2
|
||||
metadata: "hi"
|
||||
update:
|
||||
reboot-strategy: off
|
||||
units:
|
||||
- name: hi.service
|
||||
command: start
|
||||
enable: true
|
||||
- name: bye.service
|
||||
command: stop
|
||||
|
||||
ssh_authorized_keys:
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0g+ZTxC7weoIJLUafOgrm+h...
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0g+ZTxC7weoIJLUafOgrm+h...
|
||||
|
||||
users:
|
||||
- name: me
|
||||
|
||||
write_files:
|
||||
- path: /etc/yes
|
||||
content: "Hi"
|
||||
|
||||
manage_etc_hosts: localhost`
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := Validate([]byte(config)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
195
vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma/server_context.go
generated
vendored
195
vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/cloudsigma/server_context.go
generated
vendored
@ -1,195 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cloudsigma
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/datasource"
|
||||
|
||||
"github.com/cloudsigma/cepgo"
|
||||
)
|
||||
|
||||
const (
|
||||
userDataFieldName = "cloudinit-user-data"
|
||||
)
|
||||
|
||||
type serverContextService struct {
|
||||
client interface {
|
||||
All() (interface{}, error)
|
||||
Key(string) (interface{}, error)
|
||||
Meta() (map[string]string, error)
|
||||
FetchRaw(string) ([]byte, error)
|
||||
}
|
||||
}
|
||||
|
||||
func NewServerContextService() *serverContextService {
|
||||
return &serverContextService{
|
||||
client: cepgo.NewCepgo(),
|
||||
}
|
||||
}
|
||||
|
||||
func (_ *serverContextService) IsAvailable() bool {
|
||||
productNameFile, err := os.Open("/sys/class/dmi/id/product_name")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
productName := make([]byte, 10)
|
||||
_, err = productNameFile.Read(productName)
|
||||
|
||||
return err == nil && string(productName) == "CloudSigma" && hasDHCPLeases()
|
||||
}
|
||||
|
||||
func (_ *serverContextService) AvailabilityChanges() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (_ *serverContextService) ConfigRoot() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (_ *serverContextService) Type() string {
|
||||
return "server-context"
|
||||
}
|
||||
|
||||
func (scs *serverContextService) FetchMetadata() (metadata datasource.Metadata, err error) {
|
||||
var (
|
||||
inputMetadata struct {
|
||||
Name string `json:"name"`
|
||||
UUID string `json:"uuid"`
|
||||
Meta map[string]string `json:"meta"`
|
||||
Nics []struct {
|
||||
Mac string `json:"mac"`
|
||||
IPv4Conf struct {
|
||||
InterfaceType string `json:"interface_type"`
|
||||
IP struct {
|
||||
UUID string `json:"uuid"`
|
||||
} `json:"ip"`
|
||||
} `json:"ip_v4_conf"`
|
||||
VLAN struct {
|
||||
UUID string `json:"uuid"`
|
||||
} `json:"vlan"`
|
||||
} `json:"nics"`
|
||||
}
|
||||
rawMetadata []byte
|
||||
)
|
||||
|
||||
if rawMetadata, err = scs.client.FetchRaw(""); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(rawMetadata, &inputMetadata); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if inputMetadata.Name != "" {
|
||||
metadata.Hostname = inputMetadata.Name
|
||||
} else {
|
||||
metadata.Hostname = inputMetadata.UUID
|
||||
}
|
||||
|
||||
metadata.SSHPublicKeys = map[string]string{}
|
||||
if key, ok := inputMetadata.Meta["ssh_public_key"]; ok {
|
||||
splitted := strings.Split(key, " ")
|
||||
metadata.SSHPublicKeys[splitted[len(splitted)-1]] = key
|
||||
}
|
||||
|
||||
for _, nic := range inputMetadata.Nics {
|
||||
if nic.IPv4Conf.IP.UUID != "" {
|
||||
metadata.PublicIPv4 = net.ParseIP(nic.IPv4Conf.IP.UUID)
|
||||
}
|
||||
if nic.VLAN.UUID != "" {
|
||||
if localIP, err := scs.findLocalIP(nic.Mac); err == nil {
|
||||
metadata.PrivateIPv4 = localIP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (scs *serverContextService) FetchUserdata() ([]byte, error) {
|
||||
metadata, err := scs.client.Meta()
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
userData, ok := metadata[userDataFieldName]
|
||||
if ok && isBase64Encoded(userDataFieldName, metadata) {
|
||||
if decodedUserData, err := base64.StdEncoding.DecodeString(userData); err == nil {
|
||||
return decodedUserData, nil
|
||||
} else {
|
||||
return []byte{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return []byte(userData), nil
|
||||
}
|
||||
|
||||
func (scs *serverContextService) findLocalIP(mac string) (net.IP, error) {
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ifaceMac, err := net.ParseMAC(mac)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, iface := range ifaces {
|
||||
if !bytes.Equal(iface.HardwareAddr, ifaceMac) {
|
||||
continue
|
||||
}
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
switch ip := addr.(type) {
|
||||
case *net.IPNet:
|
||||
if ip.IP.To4() != nil {
|
||||
return ip.IP.To4(), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, errors.New("Local IP not found")
|
||||
}
|
||||
|
||||
func isBase64Encoded(field string, userdata map[string]string) bool {
|
||||
base64Fields, ok := userdata["base64_fields"]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, base64Field := range strings.Split(base64Fields, ",") {
|
||||
if field == base64Field {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func hasDHCPLeases() bool {
|
||||
files, err := ioutil.ReadDir("/run/systemd/netif/leases/")
|
||||
return err == nil && len(files) > 0
|
||||
}
|
@ -1,179 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cloudsigma
|
||||
|
||||
import (
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type fakeCepgoClient struct {
|
||||
raw []byte
|
||||
meta map[string]string
|
||||
keys map[string]interface{}
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeCepgoClient) All() (interface{}, error) {
|
||||
return f.keys, f.err
|
||||
}
|
||||
|
||||
func (f *fakeCepgoClient) Key(key string) (interface{}, error) {
|
||||
return f.keys[key], f.err
|
||||
}
|
||||
|
||||
func (f *fakeCepgoClient) Meta() (map[string]string, error) {
|
||||
return f.meta, f.err
|
||||
}
|
||||
|
||||
func (f *fakeCepgoClient) FetchRaw(key string) ([]byte, error) {
|
||||
return f.raw, f.err
|
||||
}
|
||||
|
||||
func TestServerContextFetchMetadata(t *testing.T) {
|
||||
client := new(fakeCepgoClient)
|
||||
scs := NewServerContextService()
|
||||
scs.client = client
|
||||
client.raw = []byte(`{
|
||||
"context": true,
|
||||
"cpu": 4000,
|
||||
"cpu_model": null,
|
||||
"cpus_instead_of_cores": false,
|
||||
"enable_numa": false,
|
||||
"grantees": [],
|
||||
"hv_relaxed": false,
|
||||
"hv_tsc": false,
|
||||
"jobs": [],
|
||||
"mem": 4294967296,
|
||||
"meta": {
|
||||
"base64_fields": "cloudinit-user-data",
|
||||
"cloudinit-user-data": "I2Nsb3VkLWNvbmZpZwoKaG9zdG5hbWU6IGNvcmVvczE=",
|
||||
"ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe"
|
||||
},
|
||||
"name": "coreos",
|
||||
"nics": [
|
||||
{
|
||||
"boot_order": null,
|
||||
"ip_v4_conf": {
|
||||
"conf": "dhcp",
|
||||
"ip": {
|
||||
"gateway": "31.171.244.1",
|
||||
"meta": {},
|
||||
"nameservers": [
|
||||
"178.22.66.167",
|
||||
"178.22.71.56",
|
||||
"8.8.8.8"
|
||||
],
|
||||
"netmask": 22,
|
||||
"tags": [],
|
||||
"uuid": "31.171.251.74"
|
||||
}
|
||||
},
|
||||
"ip_v6_conf": null,
|
||||
"mac": "22:3d:09:6b:90:f3",
|
||||
"model": "virtio",
|
||||
"vlan": null
|
||||
},
|
||||
{
|
||||
"boot_order": null,
|
||||
"ip_v4_conf": null,
|
||||
"ip_v6_conf": null,
|
||||
"mac": "22:ae:4a:fb:8f:31",
|
||||
"model": "virtio",
|
||||
"vlan": {
|
||||
"meta": {
|
||||
"description": "",
|
||||
"name": "CoreOS"
|
||||
},
|
||||
"tags": [],
|
||||
"uuid": "5dec030e-25b8-4621-a5a4-a3302c9d9619"
|
||||
}
|
||||
}
|
||||
],
|
||||
"smp": 2,
|
||||
"status": "running",
|
||||
"uuid": "20a0059b-041e-4d0c-bcc6-9b2852de48b3"
|
||||
}`)
|
||||
|
||||
metadata, err := scs.FetchMetadata()
|
||||
if err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
|
||||
if metadata.Hostname != "coreos" {
|
||||
t.Errorf("Hostname is not 'coreos' but %s instead", metadata.Hostname)
|
||||
}
|
||||
|
||||
if metadata.SSHPublicKeys["john@doe"] != "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe" {
|
||||
t.Error("Public SSH Keys are not being read properly")
|
||||
}
|
||||
|
||||
if !metadata.PublicIPv4.Equal(net.ParseIP("31.171.251.74")) {
|
||||
t.Errorf("Public IP is not 31.171.251.74 but %s instead", metadata.PublicIPv4)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServerContextFetchUserdata(t *testing.T) {
|
||||
client := new(fakeCepgoClient)
|
||||
scs := NewServerContextService()
|
||||
scs.client = client
|
||||
userdataSets := []struct {
|
||||
in map[string]string
|
||||
err bool
|
||||
out []byte
|
||||
}{
|
||||
{map[string]string{
|
||||
"base64_fields": "cloudinit-user-data",
|
||||
"cloudinit-user-data": "aG9zdG5hbWU6IGNvcmVvc190ZXN0",
|
||||
}, false, []byte("hostname: coreos_test")},
|
||||
{map[string]string{
|
||||
"cloudinit-user-data": "#cloud-config\\nhostname: coreos1",
|
||||
}, false, []byte("#cloud-config\\nhostname: coreos1")},
|
||||
{map[string]string{}, false, []byte{}},
|
||||
}
|
||||
|
||||
for i, set := range userdataSets {
|
||||
client.meta = set.in
|
||||
got, err := scs.FetchUserdata()
|
||||
if (err != nil) != set.err {
|
||||
t.Errorf("case %d: bad error state (got %t, want %t)", i, err != nil, set.err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(got, set.out) {
|
||||
t.Errorf("case %d: got %s, want %s", i, got, set.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestServerContextDecodingBase64UserData(t *testing.T) {
|
||||
base64Sets := []struct {
|
||||
in string
|
||||
out bool
|
||||
}{
|
||||
{"cloudinit-user-data,foo,bar", true},
|
||||
{"bar,cloudinit-user-data,foo,bar", true},
|
||||
{"cloudinit-user-data", true},
|
||||
{"", false},
|
||||
{"foo", false},
|
||||
}
|
||||
|
||||
for _, set := range base64Sets {
|
||||
userdata := map[string]string{"base64_fields": set.in}
|
||||
if isBase64Encoded("cloudinit-user-data", userdata) != set.out {
|
||||
t.Errorf("isBase64Encoded(cloudinit-user-data, %s) should be %t", userdata, set.out)
|
||||
}
|
||||
}
|
||||
}
|
41
vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/test/test.go
generated
vendored
41
vendor/github.com/coreos/coreos-cloudinit/datasource/metadata/test/test.go
generated
vendored
@ -1,41 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/pkg"
|
||||
)
|
||||
|
||||
type HttpClient struct {
|
||||
Resources map[string]string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (t *HttpClient) GetRetry(url string) ([]byte, error) {
|
||||
if t.Err != nil {
|
||||
return nil, t.Err
|
||||
}
|
||||
if val, ok := t.Resources[url]; ok {
|
||||
return []byte(val), nil
|
||||
} else {
|
||||
return nil, pkg.ErrNotFound{fmt.Errorf("not found: %q", url)}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *HttpClient) Get(url string) ([]byte, error) {
|
||||
return t.GetRetry(url)
|
||||
}
|
57
vendor/github.com/coreos/coreos-cloudinit/datasource/test/filesystem.go
generated
vendored
57
vendor/github.com/coreos/coreos-cloudinit/datasource/test/filesystem.go
generated
vendored
@ -1,57 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
type MockFilesystem map[string]File
|
||||
|
||||
type File struct {
|
||||
Path string
|
||||
Contents string
|
||||
Directory bool
|
||||
}
|
||||
|
||||
func (m MockFilesystem) ReadFile(filename string) ([]byte, error) {
|
||||
if f, ok := m[path.Clean(filename)]; ok {
|
||||
if f.Directory {
|
||||
return nil, fmt.Errorf("read %s: is a directory", filename)
|
||||
}
|
||||
return []byte(f.Contents), nil
|
||||
}
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
func NewMockFilesystem(files ...File) MockFilesystem {
|
||||
fs := MockFilesystem{}
|
||||
for _, file := range files {
|
||||
fs[file.Path] = file
|
||||
|
||||
// Create the directories leading up to the file
|
||||
p := path.Dir(file.Path)
|
||||
for p != "/" && p != "." {
|
||||
if f, ok := fs[p]; ok && !f.Directory {
|
||||
panic(fmt.Sprintf("%q already exists and is not a directory (%#v)", p, f))
|
||||
}
|
||||
fs[p] = File{Path: p, Directory: true}
|
||||
p = path.Dir(p)
|
||||
}
|
||||
}
|
||||
return fs
|
||||
}
|
115
vendor/github.com/coreos/coreos-cloudinit/datasource/test/filesystem_test.go
generated
vendored
115
vendor/github.com/coreos/coreos-cloudinit/datasource/test/filesystem_test.go
generated
vendored
@ -1,115 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadFile(t *testing.T) {
|
||||
tests := []struct {
|
||||
filesystem MockFilesystem
|
||||
|
||||
filename string
|
||||
contents string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
filename: "dne",
|
||||
err: os.ErrNotExist,
|
||||
},
|
||||
{
|
||||
filesystem: MockFilesystem{
|
||||
"exists": File{Contents: "hi"},
|
||||
},
|
||||
filename: "exists",
|
||||
contents: "hi",
|
||||
},
|
||||
{
|
||||
filesystem: MockFilesystem{
|
||||
"dir": File{Directory: true},
|
||||
},
|
||||
filename: "dir",
|
||||
err: errors.New("read dir: is a directory"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
contents, err := tt.filesystem.ReadFile(tt.filename)
|
||||
if tt.contents != string(contents) {
|
||||
t.Errorf("bad contents (test %d): want %q, got %q", i, tt.contents, string(contents))
|
||||
}
|
||||
if !reflect.DeepEqual(tt.err, err) {
|
||||
t.Errorf("bad error (test %d): want %v, got %v", i, tt.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewMockFilesystem(t *testing.T) {
|
||||
tests := []struct {
|
||||
files []File
|
||||
|
||||
filesystem MockFilesystem
|
||||
}{
|
||||
{
|
||||
filesystem: MockFilesystem{},
|
||||
},
|
||||
{
|
||||
files: []File{File{Path: "file"}},
|
||||
filesystem: MockFilesystem{
|
||||
"file": File{Path: "file"},
|
||||
},
|
||||
},
|
||||
{
|
||||
files: []File{File{Path: "/file"}},
|
||||
filesystem: MockFilesystem{
|
||||
"/file": File{Path: "/file"},
|
||||
},
|
||||
},
|
||||
{
|
||||
files: []File{File{Path: "/dir/file"}},
|
||||
filesystem: MockFilesystem{
|
||||
"/dir": File{Path: "/dir", Directory: true},
|
||||
"/dir/file": File{Path: "/dir/file"},
|
||||
},
|
||||
},
|
||||
{
|
||||
files: []File{File{Path: "/dir/dir/file"}},
|
||||
filesystem: MockFilesystem{
|
||||
"/dir": File{Path: "/dir", Directory: true},
|
||||
"/dir/dir": File{Path: "/dir/dir", Directory: true},
|
||||
"/dir/dir/file": File{Path: "/dir/dir/file"},
|
||||
},
|
||||
},
|
||||
{
|
||||
files: []File{File{Path: "/dir/dir/dir", Directory: true}},
|
||||
filesystem: MockFilesystem{
|
||||
"/dir": File{Path: "/dir", Directory: true},
|
||||
"/dir/dir": File{Path: "/dir/dir", Directory: true},
|
||||
"/dir/dir/dir": File{Path: "/dir/dir/dir", Directory: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
filesystem := NewMockFilesystem(tt.files...)
|
||||
if !reflect.DeepEqual(tt.filesystem, filesystem) {
|
||||
t.Errorf("bad filesystem (test %d): want %#v, got %#v", i, tt.filesystem, filesystem)
|
||||
}
|
||||
}
|
||||
}
|
117
vendor/github.com/coreos/coreos-cloudinit/datasource/waagent/waagent.go
generated
vendored
117
vendor/github.com/coreos/coreos-cloudinit/datasource/waagent/waagent.go
generated
vendored
@ -1,117 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package waagent
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/datasource"
|
||||
)
|
||||
|
||||
type waagent struct {
|
||||
root string
|
||||
readFile func(filename string) ([]byte, error)
|
||||
}
|
||||
|
||||
func NewDatasource(root string) *waagent {
|
||||
return &waagent{root, ioutil.ReadFile}
|
||||
}
|
||||
|
||||
func (a *waagent) IsAvailable() bool {
|
||||
_, err := os.Stat(path.Join(a.root, "provisioned"))
|
||||
return !os.IsNotExist(err)
|
||||
}
|
||||
|
||||
func (a *waagent) AvailabilityChanges() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *waagent) ConfigRoot() string {
|
||||
return a.root
|
||||
}
|
||||
|
||||
func (a *waagent) FetchMetadata() (metadata datasource.Metadata, err error) {
|
||||
var metadataBytes []byte
|
||||
if metadataBytes, err = a.tryReadFile(path.Join(a.root, "SharedConfig.xml")); err != nil {
|
||||
return
|
||||
}
|
||||
if len(metadataBytes) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
type Instance struct {
|
||||
Id string `xml:"id,attr"`
|
||||
Address string `xml:"address,attr"`
|
||||
InputEndpoints struct {
|
||||
Endpoints []struct {
|
||||
LoadBalancedPublicAddress string `xml:"loadBalancedPublicAddress,attr"`
|
||||
} `xml:"Endpoint"`
|
||||
}
|
||||
}
|
||||
|
||||
type SharedConfig struct {
|
||||
Incarnation struct {
|
||||
Instance string `xml:"instance,attr"`
|
||||
}
|
||||
Instances struct {
|
||||
Instances []Instance `xml:"Instance"`
|
||||
}
|
||||
}
|
||||
|
||||
var m SharedConfig
|
||||
if err = xml.Unmarshal(metadataBytes, &m); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var instance Instance
|
||||
for _, i := range m.Instances.Instances {
|
||||
if i.Id == m.Incarnation.Instance {
|
||||
instance = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
metadata.PrivateIPv4 = net.ParseIP(instance.Address)
|
||||
for _, e := range instance.InputEndpoints.Endpoints {
|
||||
host, _, err := net.SplitHostPort(e.LoadBalancedPublicAddress)
|
||||
if err == nil {
|
||||
metadata.PublicIPv4 = net.ParseIP(host)
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (a *waagent) FetchUserdata() ([]byte, error) {
|
||||
return a.tryReadFile(path.Join(a.root, "CustomData"))
|
||||
}
|
||||
|
||||
func (a *waagent) Type() string {
|
||||
return "waagent"
|
||||
}
|
||||
|
||||
func (a *waagent) tryReadFile(filename string) ([]byte, error) {
|
||||
fmt.Printf("Attempting to read from %q\n", filename)
|
||||
data, err := a.readFile(filename)
|
||||
if os.IsNotExist(err) {
|
||||
err = nil
|
||||
}
|
||||
return data, err
|
||||
}
|
166
vendor/github.com/coreos/coreos-cloudinit/datasource/waagent/waagent_test.go
generated
vendored
166
vendor/github.com/coreos/coreos-cloudinit/datasource/waagent/waagent_test.go
generated
vendored
@ -1,166 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package waagent
|
||||
|
||||
import (
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/coreos-cloudinit/datasource"
|
||||
"github.com/coreos/coreos-cloudinit/datasource/test"
|
||||
)
|
||||
|
||||
func TestFetchMetadata(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
root string
|
||||
files test.MockFilesystem
|
||||
metadata datasource.Metadata
|
||||
}{
|
||||
{
|
||||
root: "/",
|
||||
files: test.NewMockFilesystem(),
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
files: test.NewMockFilesystem(test.File{Path: "/SharedConfig.xml", Contents: ""}),
|
||||
},
|
||||
{
|
||||
root: "/var/lib/waagent",
|
||||
files: test.NewMockFilesystem(test.File{Path: "/var/lib/waagent/SharedConfig.xml", Contents: ""}),
|
||||
},
|
||||
{
|
||||
root: "/var/lib/waagent",
|
||||
files: test.NewMockFilesystem(test.File{Path: "/var/lib/waagent/SharedConfig.xml", Contents: `<?xml version="1.0" encoding="utf-8"?>
|
||||
<SharedConfig version="1.0.0.0" goalStateIncarnation="1">
|
||||
<Deployment name="c8f9e4c9c18948e1bebf57c5685da756" guid="{1d10394f-c741-4a1a-a6bb-278f213c5a5e}" incarnation="0" isNonCancellableTopologyChangeEnabled="false">
|
||||
<Service name="core-test-1" guid="{00000000-0000-0000-0000-000000000000}" />
|
||||
<ServiceInstance name="c8f9e4c9c18948e1bebf57c5685da756.0" guid="{1e202e9a-8ffe-4915-b6ef-4118c9628fda}" />
|
||||
</Deployment>
|
||||
<Incarnation number="1" instance="core-test-1" guid="{8767eb4b-b445-4783-b1f5-6c0beaf41ea0}" />
|
||||
<Role guid="{53ecc81e-257f-fbc9-a53a-8cf1a0a122b4}" name="core-test-1" settleTimeSeconds="0" />
|
||||
<LoadBalancerSettings timeoutSeconds="0" waitLoadBalancerProbeCount="8">
|
||||
<Probes>
|
||||
<Probe name="D41D8CD98F00B204E9800998ECF8427E" />
|
||||
<Probe name="C9DEC1518E1158748FA4B6081A8266DD" />
|
||||
</Probes>
|
||||
</LoadBalancerSettings>
|
||||
<OutputEndpoints>
|
||||
<Endpoint name="core-test-1:openInternalEndpoint" type="SFS">
|
||||
<Target instance="core-test-1" endpoint="openInternalEndpoint" />
|
||||
</Endpoint>
|
||||
</OutputEndpoints>
|
||||
<Instances>
|
||||
<Instance id="core-test-1" address="100.73.202.64">
|
||||
<FaultDomains randomId="0" updateId="0" updateCount="0" />
|
||||
<InputEndpoints>
|
||||
<Endpoint name="openInternalEndpoint" address="100.73.202.64" protocol="any" isPublic="false" enableDirectServerReturn="false" isDirectAddress="false" disableStealthMode="false">
|
||||
<LocalPorts>
|
||||
<LocalPortSelfManaged />
|
||||
</LocalPorts>
|
||||
</Endpoint>
|
||||
<Endpoint name="ssh" address="100.73.202.64:22" protocol="tcp" hostName="core-test-1ContractContract" isPublic="true" loadBalancedPublicAddress="191.239.39.77:22" enableDirectServerReturn="false" isDirectAddress="false" disableStealthMode="false">
|
||||
<LocalPorts>
|
||||
<LocalPortRange from="22" to="22" />
|
||||
</LocalPorts>
|
||||
</Endpoint>
|
||||
</InputEndpoints>
|
||||
</Instance>
|
||||
</Instances>
|
||||
</SharedConfig>`}),
|
||||
metadata: datasource.Metadata{
|
||||
PrivateIPv4: net.ParseIP("100.73.202.64"),
|
||||
PublicIPv4: net.ParseIP("191.239.39.77"),
|
||||
},
|
||||
},
|
||||
} {
|
||||
a := waagent{tt.root, tt.files.ReadFile}
|
||||
metadata, err := a.FetchMetadata()
|
||||
if err != nil {
|
||||
t.Fatalf("bad error for %+v: want %v, got %q", tt, nil, err)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.metadata, metadata) {
|
||||
t.Fatalf("bad metadata for %+v: want %#v, got %#v", tt, tt.metadata, metadata)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchUserdata(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
root string
|
||||
files test.MockFilesystem
|
||||
}{
|
||||
{
|
||||
"/",
|
||||
test.NewMockFilesystem(),
|
||||
},
|
||||
{
|
||||
"/",
|
||||
test.NewMockFilesystem(test.File{Path: "/CustomData", Contents: ""}),
|
||||
},
|
||||
{
|
||||
"/var/lib/waagent/",
|
||||
test.NewMockFilesystem(test.File{Path: "/var/lib/waagent/CustomData", Contents: ""}),
|
||||
},
|
||||
} {
|
||||
a := waagent{tt.root, tt.files.ReadFile}
|
||||
_, err := a.FetchUserdata()
|
||||
if err != nil {
|
||||
t.Fatalf("bad error for %+v: want %v, got %q", tt, nil, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigRoot(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
root string
|
||||
configRoot string
|
||||
}{
|
||||
{
|
||||
"/",
|
||||
"/",
|
||||
},
|
||||
{
|
||||
"/var/lib/waagent",
|
||||
"/var/lib/waagent",
|
||||
},
|
||||
} {
|
||||
a := waagent{tt.root, nil}
|
||||
if configRoot := a.ConfigRoot(); configRoot != tt.configRoot {
|
||||
t.Fatalf("bad config root for %q: want %q, got %q", tt, tt.configRoot, configRoot)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewDatasource(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
root string
|
||||
expectRoot string
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
expectRoot: "",
|
||||
},
|
||||
{
|
||||
root: "/var/lib/waagent",
|
||||
expectRoot: "/var/lib/waagent",
|
||||
},
|
||||
} {
|
||||
service := NewDatasource(tt.root)
|
||||
if service.root != tt.expectRoot {
|
||||
t.Fatalf("bad root (%q): want %q, got %q", tt.root, tt.expectRoot, service.root)
|
||||
}
|
||||
}
|
||||
}
|
82
vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go
generated
vendored
82
vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go
generated
vendored
@ -1,82 +0,0 @@
|
||||
// Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when both cgo is supported and "-tags testcgo" is added to the go test
|
||||
// command line. This code should really only be in the dumpcgo_test.go file,
|
||||
// but unfortunately Go will not allow cgo in test files, so this is a
|
||||
// workaround to allow cgo types to be tested. This configuration is used
|
||||
// because spew itself does not require cgo to run even though it does handle
|
||||
// certain cgo types specially. Rather than forcing all clients to require cgo
|
||||
// and an external C compiler just to run the tests, this scheme makes them
|
||||
// optional.
|
||||
// +build cgo,testcgo
|
||||
|
||||
package testdata
|
||||
|
||||
/*
|
||||
#include <stdint.h>
|
||||
typedef unsigned char custom_uchar_t;
|
||||
|
||||
char *ncp = 0;
|
||||
char *cp = "test";
|
||||
char ca[6] = {'t', 'e', 's', 't', '2', '\0'};
|
||||
unsigned char uca[6] = {'t', 'e', 's', 't', '3', '\0'};
|
||||
signed char sca[6] = {'t', 'e', 's', 't', '4', '\0'};
|
||||
uint8_t ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'};
|
||||
custom_uchar_t tuca[6] = {'t', 'e', 's', 't', '6', '\0'};
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// GetCgoNullCharPointer returns a null char pointer via cgo. This is only
|
||||
// used for tests.
|
||||
func GetCgoNullCharPointer() interface{} {
|
||||
return C.ncp
|
||||
}
|
||||
|
||||
// GetCgoCharPointer returns a char pointer via cgo. This is only used for
|
||||
// tests.
|
||||
func GetCgoCharPointer() interface{} {
|
||||
return C.cp
|
||||
}
|
||||
|
||||
// GetCgoCharArray returns a char array via cgo and the array's len and cap.
|
||||
// This is only used for tests.
|
||||
func GetCgoCharArray() (interface{}, int, int) {
|
||||
return C.ca, len(C.ca), cap(C.ca)
|
||||
}
|
||||
|
||||
// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the
|
||||
// array's len and cap. This is only used for tests.
|
||||
func GetCgoUnsignedCharArray() (interface{}, int, int) {
|
||||
return C.uca, len(C.uca), cap(C.uca)
|
||||
}
|
||||
|
||||
// GetCgoSignedCharArray returns a signed char array via cgo and the array's len
|
||||
// and cap. This is only used for tests.
|
||||
func GetCgoSignedCharArray() (interface{}, int, int) {
|
||||
return C.sca, len(C.sca), cap(C.sca)
|
||||
}
|
||||
|
||||
// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and
|
||||
// cap. This is only used for tests.
|
||||
func GetCgoUint8tArray() (interface{}, int, int) {
|
||||
return C.ui8ta, len(C.ui8ta), cap(C.ui8ta)
|
||||
}
|
||||
|
||||
// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via
|
||||
// cgo and the array's len and cap. This is only used for tests.
|
||||
func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) {
|
||||
return C.tuca, len(C.tuca), cap(C.tuca)
|
||||
}
|
179
vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go
generated
vendored
179
vendor/github.com/docker/distribution/registry/storage/cache/cachecheck/suite.go
generated
vendored
@ -1,179 +0,0 @@
|
||||
package cachecheck
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/registry/storage/cache"
|
||||
)
|
||||
|
||||
// CheckBlobDescriptorCache takes a cache implementation through a common set
|
||||
// of operations. If adding new tests, please add them here so new
|
||||
// implementations get the benefit. This should be used for unit tests.
|
||||
func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCacheProvider) {
|
||||
ctx := context.Background()
|
||||
|
||||
checkBlobDescriptorCacheEmptyRepository(t, ctx, provider)
|
||||
checkBlobDescriptorCacheSetAndRead(t, ctx, provider)
|
||||
}
|
||||
|
||||
func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) {
|
||||
if _, err := provider.Stat(ctx, "sha384:abc"); err != distribution.ErrBlobUnknown {
|
||||
t.Fatalf("expected unknown blob error with empty store: %v", err)
|
||||
}
|
||||
|
||||
cache, err := provider.RepositoryScoped("")
|
||||
if err == nil {
|
||||
t.Fatalf("expected an error when asking for invalid repo")
|
||||
}
|
||||
|
||||
cache, err = provider.RepositoryScoped("foo/bar")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting repository: %v", err)
|
||||
}
|
||||
|
||||
if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{
|
||||
Digest: "sha384:abc",
|
||||
Size: 10,
|
||||
MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat {
|
||||
t.Fatalf("expected error with invalid digest: %v", err)
|
||||
}
|
||||
|
||||
if err := cache.SetDescriptor(ctx, "sha384:abc", distribution.Descriptor{
|
||||
Digest: "",
|
||||
Size: 10,
|
||||
MediaType: "application/octet-stream"}); err == nil {
|
||||
t.Fatalf("expected error setting value on invalid descriptor")
|
||||
}
|
||||
|
||||
if _, err := cache.Stat(ctx, ""); err != digest.ErrDigestInvalidFormat {
|
||||
t.Fatalf("expected error checking for cache item with empty digest: %v", err)
|
||||
}
|
||||
|
||||
if _, err := cache.Stat(ctx, "sha384:abc"); err != distribution.ErrBlobUnknown {
|
||||
t.Fatalf("expected unknown blob error with empty repo: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) {
|
||||
localDigest := digest.Digest("sha384:abc")
|
||||
expected := distribution.Descriptor{
|
||||
Digest: "sha256:abc",
|
||||
Size: 10,
|
||||
MediaType: "application/octet-stream"}
|
||||
|
||||
cache, err := provider.RepositoryScoped("foo/bar")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting scoped cache: %v", err)
|
||||
}
|
||||
|
||||
if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil {
|
||||
t.Fatalf("error setting descriptor: %v", err)
|
||||
}
|
||||
|
||||
desc, err := cache.Stat(ctx, localDigest)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error statting fake2:abc: %v", err)
|
||||
}
|
||||
|
||||
if expected != desc {
|
||||
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
|
||||
}
|
||||
|
||||
// also check that we set the canonical key ("fake:abc")
|
||||
desc, err = cache.Stat(ctx, localDigest)
|
||||
if err != nil {
|
||||
t.Fatalf("descriptor not returned for canonical key: %v", err)
|
||||
}
|
||||
|
||||
if expected != desc {
|
||||
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
|
||||
}
|
||||
|
||||
// ensure that global gets extra descriptor mapping
|
||||
desc, err = provider.Stat(ctx, localDigest)
|
||||
if err != nil {
|
||||
t.Fatalf("expected blob unknown in global cache: %v, %v", err, desc)
|
||||
}
|
||||
|
||||
if desc != expected {
|
||||
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
|
||||
}
|
||||
|
||||
// get at it through canonical descriptor
|
||||
desc, err = provider.Stat(ctx, expected.Digest)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error checking glboal descriptor: %v", err)
|
||||
}
|
||||
|
||||
if desc != expected {
|
||||
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
|
||||
}
|
||||
|
||||
// now, we set the repo local mediatype to something else and ensure it
|
||||
// doesn't get changed in the provider cache.
|
||||
expected.MediaType = "application/json"
|
||||
|
||||
if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil {
|
||||
t.Fatalf("unexpected error setting descriptor: %v", err)
|
||||
}
|
||||
|
||||
desc, err = cache.Stat(ctx, localDigest)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting descriptor: %v", err)
|
||||
}
|
||||
|
||||
if desc != expected {
|
||||
t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected)
|
||||
}
|
||||
|
||||
desc, err = provider.Stat(ctx, localDigest)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting global descriptor: %v", err)
|
||||
}
|
||||
|
||||
expected.MediaType = "application/octet-stream" // expect original mediatype in global
|
||||
|
||||
if desc != expected {
|
||||
t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func checkBlobDescriptorClear(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) {
|
||||
localDigest := digest.Digest("sha384:abc")
|
||||
expected := distribution.Descriptor{
|
||||
Digest: "sha256:abc",
|
||||
Size: 10,
|
||||
MediaType: "application/octet-stream"}
|
||||
|
||||
cache, err := provider.RepositoryScoped("foo/bar")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting scoped cache: %v", err)
|
||||
}
|
||||
|
||||
if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil {
|
||||
t.Fatalf("error setting descriptor: %v", err)
|
||||
}
|
||||
|
||||
desc, err := cache.Stat(ctx, localDigest)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error statting fake2:abc: %v", err)
|
||||
}
|
||||
|
||||
if expected != desc {
|
||||
t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc)
|
||||
}
|
||||
|
||||
err = cache.Clear(ctx, localDigest)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error deleting descriptor")
|
||||
}
|
||||
|
||||
nonExistantDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
|
||||
err = cache.Clear(ctx, nonExistantDigest)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error deleting unknown descriptor")
|
||||
}
|
||||
}
|
268
vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go
generated
vendored
268
vendor/github.com/docker/distribution/registry/storage/cache/redis/redis.go
generated
vendored
@ -1,268 +0,0 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/storage/cache"
|
||||
"github.com/garyburd/redigo/redis"
|
||||
)
|
||||
|
||||
// redisBlobStatService provides an implementation of
|
||||
// BlobDescriptorCacheProvider based on redis. Blob descriptors are stored in
|
||||
// two parts. The first provide fast access to repository membership through a
|
||||
// redis set for each repo. The second is a redis hash keyed by the digest of
|
||||
// the layer, providing path, length and mediatype information. There is also
|
||||
// a per-repository redis hash of the blob descriptor, allowing override of
|
||||
// data. This is currently used to override the mediatype on a per-repository
|
||||
// basis.
|
||||
//
|
||||
// Note that there is no implied relationship between these two caches. The
|
||||
// layer may exist in one, both or none and the code must be written this way.
|
||||
type redisBlobDescriptorService struct {
|
||||
pool *redis.Pool
|
||||
|
||||
// TODO(stevvooe): We use a pool because we don't have great control over
|
||||
// the cache lifecycle to manage connections. A new connection if fetched
|
||||
// for each operation. Once we have better lifecycle management of the
|
||||
// request objects, we can change this to a connection.
|
||||
}
|
||||
|
||||
// NewRedisBlobDescriptorCacheProvider returns a new redis-based
|
||||
// BlobDescriptorCacheProvider using the provided redis connection pool.
|
||||
func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorCacheProvider {
|
||||
return &redisBlobDescriptorService{
|
||||
pool: pool,
|
||||
}
|
||||
}
|
||||
|
||||
// RepositoryScoped returns the scoped cache.
|
||||
func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) {
|
||||
if _, err := reference.ParseNamed(repo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &repositoryScopedRedisBlobDescriptorService{
|
||||
repo: repo,
|
||||
upstream: rbds,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Stat retrieves the descriptor data from the redis hash entry.
|
||||
func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
||||
if err := dgst.Validate(); err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
conn := rbds.pool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
return rbds.stat(ctx, conn, dgst)
|
||||
}
|
||||
|
||||
func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error {
|
||||
if err := dgst.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
conn := rbds.pool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
// Not atomic in redis <= 2.3
|
||||
reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if reply == 0 {
|
||||
return distribution.ErrBlobUnknown
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// stat provides an internal stat call that takes a connection parameter. This
|
||||
// allows some internal management of the connection scope.
|
||||
func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) {
|
||||
reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype"))
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
// NOTE(stevvooe): The "size" field used to be "length". We treat a
|
||||
// missing "size" field here as an unknown blob, which causes a cache
|
||||
// miss, effectively migrating the field.
|
||||
if len(reply) < 3 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil
|
||||
return distribution.Descriptor{}, distribution.ErrBlobUnknown
|
||||
}
|
||||
|
||||
var desc distribution.Descriptor
|
||||
if _, err := redis.Scan(reply, &desc.Digest, &desc.Size, &desc.MediaType); err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
// SetDescriptor sets the descriptor data for the given digest using a redis
|
||||
// hash. A hash is used here since we may store unrelated fields about a layer
|
||||
// in the future.
|
||||
func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
||||
if err := dgst.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cache.ValidateDescriptor(desc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
conn := rbds.pool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
return rbds.setDescriptor(ctx, conn, dgst, desc)
|
||||
}
|
||||
|
||||
func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error {
|
||||
if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst),
|
||||
"digest", desc.Digest,
|
||||
"size", desc.Size); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Only set mediatype if not already set.
|
||||
if _, err := conn.Do("HSETNX", rbds.blobDescriptorHashKey(dgst),
|
||||
"mediatype", desc.MediaType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rbds *redisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string {
|
||||
return "blobs::" + dgst.String()
|
||||
}
|
||||
|
||||
type repositoryScopedRedisBlobDescriptorService struct {
|
||||
repo string
|
||||
upstream *redisBlobDescriptorService
|
||||
}
|
||||
|
||||
var _ distribution.BlobDescriptorService = &repositoryScopedRedisBlobDescriptorService{}
|
||||
|
||||
// Stat ensures that the digest is a member of the specified repository and
|
||||
// forwards the descriptor request to the global blob store. If the media type
|
||||
// differs for the repository, we override it.
|
||||
func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
||||
if err := dgst.Validate(); err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
conn := rsrbds.upstream.pool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
// Check membership to repository first
|
||||
member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst))
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
if !member {
|
||||
return distribution.Descriptor{}, distribution.ErrBlobUnknown
|
||||
}
|
||||
|
||||
upstream, err := rsrbds.upstream.stat(ctx, conn, dgst)
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
// We allow a per repository mediatype, let's look it up here.
|
||||
mediatype, err := redis.String(conn.Do("HGET", rsrbds.blobDescriptorHashKey(dgst), "mediatype"))
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
if mediatype != "" {
|
||||
upstream.MediaType = mediatype
|
||||
}
|
||||
|
||||
return upstream, nil
|
||||
}
|
||||
|
||||
// Clear removes the descriptor from the cache and forwards to the upstream descriptor store
|
||||
func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error {
|
||||
if err := dgst.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
conn := rsrbds.upstream.pool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
// Check membership to repository first
|
||||
member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !member {
|
||||
return distribution.ErrBlobUnknown
|
||||
}
|
||||
|
||||
return rsrbds.upstream.Clear(ctx, dgst)
|
||||
}
|
||||
|
||||
func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
||||
if err := dgst.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cache.ValidateDescriptor(desc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if dgst != desc.Digest {
|
||||
if dgst.Algorithm() == desc.Digest.Algorithm() {
|
||||
return fmt.Errorf("redis cache: digest for descriptors differ but algorthim does not: %q != %q", dgst, desc.Digest)
|
||||
}
|
||||
}
|
||||
|
||||
conn := rsrbds.upstream.pool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
return rsrbds.setDescriptor(ctx, conn, dgst, desc)
|
||||
}
|
||||
|
||||
func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error {
|
||||
if _, err := conn.Do("SADD", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := rsrbds.upstream.setDescriptor(ctx, conn, dgst, desc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Override repository mediatype.
|
||||
if _, err := conn.Do("HSET", rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Also set the values for the primary descriptor, if they differ by
|
||||
// algorithm (ie sha256 vs tarsum).
|
||||
if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() {
|
||||
if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rsrbds *repositoryScopedRedisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string {
|
||||
return "repository::" + rsrbds.repo + "::blobs::" + dgst.String()
|
||||
}
|
||||
|
||||
func (rsrbds *repositoryScopedRedisBlobDescriptorService) repositoryBlobSetKey(repo string) string {
|
||||
return "repository::" + rsrbds.repo + "::blobs"
|
||||
}
|
51
vendor/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go
generated
vendored
51
vendor/github.com/docker/distribution/registry/storage/cache/redis/redis_test.go
generated
vendored
@ -1,51 +0,0 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/registry/storage/cache/cachecheck"
|
||||
"github.com/garyburd/redigo/redis"
|
||||
)
|
||||
|
||||
var redisAddr string
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&redisAddr, "test.registry.storage.cache.redis.addr", "", "configure the address of a test instance of redis")
|
||||
}
|
||||
|
||||
// TestRedisLayerInfoCache exercises a live redis instance using the cache
|
||||
// implementation.
|
||||
func TestRedisBlobDescriptorCacheProvider(t *testing.T) {
|
||||
if redisAddr == "" {
|
||||
// fallback to an environement variable
|
||||
redisAddr = os.Getenv("TEST_REGISTRY_STORAGE_CACHE_REDIS_ADDR")
|
||||
}
|
||||
|
||||
if redisAddr == "" {
|
||||
// skip if still not set
|
||||
t.Skip("please set -registry.storage.cache.redis to test layer info cache against redis")
|
||||
}
|
||||
|
||||
pool := &redis.Pool{
|
||||
Dial: func() (redis.Conn, error) {
|
||||
return redis.Dial("tcp", redisAddr)
|
||||
},
|
||||
MaxIdle: 1,
|
||||
MaxActive: 2,
|
||||
TestOnBorrow: func(c redis.Conn, t time.Time) error {
|
||||
_, err := c.Do("PING")
|
||||
return err
|
||||
},
|
||||
Wait: false, // if a connection is not avialable, proceed without cache.
|
||||
}
|
||||
|
||||
// Clear the database
|
||||
if _, err := pool.Get().Do("FLUSHDB"); err != nil {
|
||||
t.Fatalf("unexpected error flushing redis db: %v", err)
|
||||
}
|
||||
|
||||
cachecheck.CheckBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool))
|
||||
}
|
5
vendor/github.com/docker/docker/api/README.md
generated
vendored
5
vendor/github.com/docker/docker/api/README.md
generated
vendored
@ -1,5 +0,0 @@
|
||||
This directory contains code pertaining to the Docker API:
|
||||
|
||||
- Used by the docker client when communicating with the docker daemon
|
||||
|
||||
- Used by third party tools wishing to interface with the docker daemon
|
88
vendor/github.com/docker/docker/api/client/attach.go
generated
vendored
88
vendor/github.com/docker/docker/api/client/attach.go
generated
vendored
@ -1,88 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
)
|
||||
|
||||
// CmdAttach attaches to a running container.
|
||||
//
|
||||
// Usage: docker attach [OPTIONS] CONTAINER
|
||||
func (cli *DockerCli) CmdAttach(args ...string) error {
|
||||
cmd := Cli.Subcmd("attach", []string{"CONTAINER"}, Cli.DockerCommands["attach"].Description, true)
|
||||
noStdin := cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN")
|
||||
proxy := cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process")
|
||||
|
||||
cmd.Require(flag.Exact, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
serverResp, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
var c types.ContainerJSON
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !c.State.Running {
|
||||
return fmt.Errorf("You cannot attach to a stopped container, start it first")
|
||||
}
|
||||
|
||||
if c.State.Paused {
|
||||
return fmt.Errorf("You cannot attach to a paused container, unpause it first")
|
||||
}
|
||||
|
||||
if err := cli.CheckTtyInput(!*noStdin, c.Config.Tty); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.Config.Tty && cli.isTerminalOut {
|
||||
if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil {
|
||||
logrus.Debugf("Error monitoring TTY size: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
var in io.ReadCloser
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("stream", "1")
|
||||
if !*noStdin && c.Config.OpenStdin {
|
||||
v.Set("stdin", "1")
|
||||
in = cli.in
|
||||
}
|
||||
|
||||
v.Set("stdout", "1")
|
||||
v.Set("stderr", "1")
|
||||
|
||||
if *proxy && !c.Config.Tty {
|
||||
sigc := cli.forwardAllSignals(cmd.Arg(0))
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
|
||||
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), c.Config.Tty, in, cli.out, cli.err, nil, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, status, err := getExitCode(cli, cmd.Arg(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if status != 0 {
|
||||
return Cli.StatusError{StatusCode: status}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
685
vendor/github.com/docker/docker/api/client/build.go
generated
vendored
685
vendor/github.com/docker/docker/api/client/build.go
generated
vendored
@ -1,685 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/graph/tags"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/httputils"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/progressreader"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/ulimit"
|
||||
"github.com/docker/docker/pkg/units"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
tarHeaderSize = 512
|
||||
)
|
||||
|
||||
// CmdBuild builds a new image from the source code at a given path.
|
||||
//
|
||||
// If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN.
|
||||
//
|
||||
// Usage: docker build [OPTIONS] PATH | URL | -
|
||||
func (cli *DockerCli) CmdBuild(args ...string) error {
|
||||
cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, Cli.DockerCommands["build"].Description, true)
|
||||
flTags := opts.NewListOpts(validateTag)
|
||||
cmd.Var(&flTags, []string{"t", "-tag"}, "Name and optionally a tag in the 'name:tag' format")
|
||||
suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers")
|
||||
noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
|
||||
rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
|
||||
forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers")
|
||||
pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
|
||||
dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
|
||||
flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
|
||||
flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap")
|
||||
flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
|
||||
flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
|
||||
flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
|
||||
flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
|
||||
flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)")
|
||||
flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container")
|
||||
flBuildArg := opts.NewListOpts(opts.ValidateEnv)
|
||||
cmd.Var(&flBuildArg, []string{"-build-arg"}, "Set build-time variables")
|
||||
isolation := cmd.String([]string{"-isolation"}, "", "Container isolation level")
|
||||
|
||||
ulimits := make(map[string]*ulimit.Ulimit)
|
||||
flUlimits := opts.NewUlimitOpt(&ulimits)
|
||||
cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options")
|
||||
|
||||
cmd.Require(flag.Exact, 1)
|
||||
|
||||
// For trusted pull on "FROM <image>" instruction.
|
||||
addTrustedFlags(cmd, true)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var (
|
||||
context io.ReadCloser
|
||||
isRemote bool
|
||||
err error
|
||||
)
|
||||
|
||||
_, err = exec.LookPath("git")
|
||||
hasGit := err == nil
|
||||
|
||||
specifiedContext := cmd.Arg(0)
|
||||
|
||||
var (
|
||||
contextDir string
|
||||
tempDir string
|
||||
relDockerfile string
|
||||
)
|
||||
|
||||
switch {
|
||||
case specifiedContext == "-":
|
||||
tempDir, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName)
|
||||
case urlutil.IsGitURL(specifiedContext) && hasGit:
|
||||
tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName)
|
||||
case urlutil.IsURL(specifiedContext):
|
||||
tempDir, relDockerfile, err = getContextFromURL(cli.out, specifiedContext, *dockerfileName)
|
||||
default:
|
||||
contextDir, relDockerfile, err = getContextFromLocalDir(specifiedContext, *dockerfileName)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to prepare context: %s", err)
|
||||
}
|
||||
|
||||
if tempDir != "" {
|
||||
defer os.RemoveAll(tempDir)
|
||||
contextDir = tempDir
|
||||
}
|
||||
|
||||
// Resolve the FROM lines in the Dockerfile to trusted digest references
|
||||
// using Notary. On a successful build, we must tag the resolved digests
|
||||
// to the original name specified in the Dockerfile.
|
||||
newDockerfile, resolvedTags, err := rewriteDockerfileFrom(filepath.Join(contextDir, relDockerfile), cli.trustedReference)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to process Dockerfile: %v", err)
|
||||
}
|
||||
defer newDockerfile.Close()
|
||||
|
||||
// And canonicalize dockerfile name to a platform-independent one
|
||||
relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
|
||||
}
|
||||
|
||||
f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
var excludes []string
|
||||
if err == nil {
|
||||
excludes, err = utils.ReadDockerIgnore(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := utils.ValidateContextDirectory(contextDir, excludes); err != nil {
|
||||
return fmt.Errorf("Error checking context: '%s'.", err)
|
||||
}
|
||||
|
||||
// If .dockerignore mentions .dockerignore or the Dockerfile
|
||||
// then make sure we send both files over to the daemon
|
||||
// because Dockerfile is, obviously, needed no matter what, and
|
||||
// .dockerignore is needed to know if either one needs to be
|
||||
// removed. The deamon will remove them for us, if needed, after it
|
||||
// parses the Dockerfile. Ignore errors here, as they will have been
|
||||
// caught by ValidateContextDirectory above.
|
||||
var includes = []string{"."}
|
||||
keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
|
||||
keepThem2, _ := fileutils.Matches(relDockerfile, excludes)
|
||||
if keepThem1 || keepThem2 {
|
||||
includes = append(includes, ".dockerignore", relDockerfile)
|
||||
}
|
||||
|
||||
context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
ExcludePatterns: excludes,
|
||||
IncludeFiles: includes,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wrap the tar archive to replace the Dockerfile entry with the rewritten
|
||||
// Dockerfile which uses trusted pulls.
|
||||
context = replaceDockerfileTarWrapper(context, newDockerfile, relDockerfile)
|
||||
|
||||
// Setup an upload progress bar
|
||||
// FIXME: ProgressReader shouldn't be this annoying to use
|
||||
sf := streamformatter.NewStreamFormatter()
|
||||
var body io.Reader = progressreader.New(progressreader.Config{
|
||||
In: context,
|
||||
Out: cli.out,
|
||||
Formatter: sf,
|
||||
NewLines: true,
|
||||
ID: "",
|
||||
Action: "Sending build context to Docker daemon",
|
||||
})
|
||||
|
||||
var memory int64
|
||||
if *flMemoryString != "" {
|
||||
parsedMemory, err := units.RAMInBytes(*flMemoryString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
memory = parsedMemory
|
||||
}
|
||||
|
||||
var memorySwap int64
|
||||
if *flMemorySwap != "" {
|
||||
if *flMemorySwap == "-1" {
|
||||
memorySwap = -1
|
||||
} else {
|
||||
parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
memorySwap = parsedMemorySwap
|
||||
}
|
||||
}
|
||||
|
||||
// Send the build context
|
||||
v := url.Values{
|
||||
"t": flTags.GetAll(),
|
||||
}
|
||||
if *suppressOutput {
|
||||
v.Set("q", "1")
|
||||
}
|
||||
if isRemote {
|
||||
v.Set("remote", cmd.Arg(0))
|
||||
}
|
||||
if *noCache {
|
||||
v.Set("nocache", "1")
|
||||
}
|
||||
if *rm {
|
||||
v.Set("rm", "1")
|
||||
} else {
|
||||
v.Set("rm", "0")
|
||||
}
|
||||
|
||||
if *forceRm {
|
||||
v.Set("forcerm", "1")
|
||||
}
|
||||
|
||||
if *pull {
|
||||
v.Set("pull", "1")
|
||||
}
|
||||
|
||||
if !runconfig.IsolationLevel.IsDefault(runconfig.IsolationLevel(*isolation)) {
|
||||
v.Set("isolation", *isolation)
|
||||
}
|
||||
|
||||
v.Set("cpusetcpus", *flCPUSetCpus)
|
||||
v.Set("cpusetmems", *flCPUSetMems)
|
||||
v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10))
|
||||
v.Set("cpuquota", strconv.FormatInt(*flCPUQuota, 10))
|
||||
v.Set("cpuperiod", strconv.FormatInt(*flCPUPeriod, 10))
|
||||
v.Set("memory", strconv.FormatInt(memory, 10))
|
||||
v.Set("memswap", strconv.FormatInt(memorySwap, 10))
|
||||
v.Set("cgroupparent", *flCgroupParent)
|
||||
|
||||
v.Set("dockerfile", relDockerfile)
|
||||
|
||||
ulimitsVar := flUlimits.GetList()
|
||||
ulimitsJSON, err := json.Marshal(ulimitsVar)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set("ulimits", string(ulimitsJSON))
|
||||
|
||||
// collect all the build-time environment variables for the container
|
||||
buildArgs := runconfig.ConvertKVStringsToMap(flBuildArg.GetAll())
|
||||
buildArgsJSON, err := json.Marshal(buildArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set("buildargs", string(buildArgsJSON))
|
||||
|
||||
headers := http.Header(make(map[string][]string))
|
||||
buf, err := json.Marshal(cli.configFile.AuthConfigs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
|
||||
headers.Set("Content-Type", "application/tar")
|
||||
|
||||
sopts := &streamOpts{
|
||||
rawTerminal: true,
|
||||
in: body,
|
||||
out: cli.out,
|
||||
headers: headers,
|
||||
}
|
||||
|
||||
serverResp, err := cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), sopts)
|
||||
|
||||
// Windows: show error message about modified file permissions.
|
||||
if runtime.GOOS == "windows" {
|
||||
h, err := httputils.ParseServerHeader(serverResp.header.Get("Server"))
|
||||
if err == nil {
|
||||
if h.OS != "windows" {
|
||||
fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if jerr, ok := err.(*jsonmessage.JSONError); ok {
|
||||
// If no error code is set, default to 1
|
||||
if jerr.Code == 0 {
|
||||
jerr.Code = 1
|
||||
}
|
||||
return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Since the build was successful, now we must tag any of the resolved
|
||||
// images from the above Dockerfile rewrite.
|
||||
for _, resolved := range resolvedTags {
|
||||
if err := cli.tagTrusted(resolved.repoInfo, resolved.digestRef, resolved.tagRef); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateTag checks if the given image name can be resolved.
|
||||
func validateTag(rawRepo string) (string, error) {
|
||||
repository, tag := parsers.ParseRepositoryTag(rawRepo)
|
||||
if err := registry.ValidateRepositoryName(repository); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(tag) == 0 {
|
||||
return rawRepo, nil
|
||||
}
|
||||
|
||||
if err := tags.ValidateTagName(tag); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return rawRepo, nil
|
||||
}
|
||||
|
||||
// isUNC returns true if the path is UNC (one starting \\). It always returns
|
||||
// false on Linux.
|
||||
func isUNC(path string) bool {
|
||||
return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`)
|
||||
}
|
||||
|
||||
// getDockerfileRelPath uses the given context directory for a `docker build`
|
||||
// and returns the absolute path to the context directory, the relative path of
|
||||
// the dockerfile in that context directory, and a non-nil error on success.
|
||||
func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) {
|
||||
if absContextDir, err = filepath.Abs(givenContextDir); err != nil {
|
||||
return "", "", fmt.Errorf("unable to get absolute context directory: %v", err)
|
||||
}
|
||||
|
||||
// The context dir might be a symbolic link, so follow it to the actual
|
||||
// target directory.
|
||||
//
|
||||
// FIXME. We use isUNC (always false on non-Windows platforms) to workaround
|
||||
// an issue in golang. On Windows, EvalSymLinks does not work on UNC file
|
||||
// paths (those starting with \\). This hack means that when using links
|
||||
// on UNC paths, they will not be followed.
|
||||
if !isUNC(absContextDir) {
|
||||
absContextDir, err = filepath.EvalSymlinks(absContextDir)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
stat, err := os.Lstat(absContextDir)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err)
|
||||
}
|
||||
|
||||
if !stat.IsDir() {
|
||||
return "", "", fmt.Errorf("context must be a directory: %s", absContextDir)
|
||||
}
|
||||
|
||||
absDockerfile := givenDockerfile
|
||||
if absDockerfile == "" {
|
||||
// No -f/--file was specified so use the default relative to the
|
||||
// context directory.
|
||||
absDockerfile = filepath.Join(absContextDir, api.DefaultDockerfileName)
|
||||
|
||||
// Just to be nice ;-) look for 'dockerfile' too but only
|
||||
// use it if we found it, otherwise ignore this check
|
||||
if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) {
|
||||
altPath := filepath.Join(absContextDir, strings.ToLower(api.DefaultDockerfileName))
|
||||
if _, err = os.Lstat(altPath); err == nil {
|
||||
absDockerfile = altPath
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If not already an absolute path, the Dockerfile path should be joined to
|
||||
// the base directory.
|
||||
if !filepath.IsAbs(absDockerfile) {
|
||||
absDockerfile = filepath.Join(absContextDir, absDockerfile)
|
||||
}
|
||||
|
||||
// Evaluate symlinks in the path to the Dockerfile too.
|
||||
//
|
||||
// FIXME. We use isUNC (always false on non-Windows platforms) to workaround
|
||||
// an issue in golang. On Windows, EvalSymLinks does not work on UNC file
|
||||
// paths (those starting with \\). This hack means that when using links
|
||||
// on UNC paths, they will not be followed.
|
||||
if !isUNC(absDockerfile) {
|
||||
absDockerfile, err = filepath.EvalSymlinks(absDockerfile)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Lstat(absDockerfile); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return "", "", fmt.Errorf("Cannot locate Dockerfile: %q", absDockerfile)
|
||||
}
|
||||
return "", "", fmt.Errorf("unable to stat Dockerfile: %v", err)
|
||||
}
|
||||
|
||||
if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil {
|
||||
return "", "", fmt.Errorf("unable to get relative Dockerfile path: %v", err)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) {
|
||||
return "", "", fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir)
|
||||
}
|
||||
|
||||
return absContextDir, relDockerfile, nil
|
||||
}
|
||||
|
||||
// writeToFile copies from the given reader and writes it to a file with the
|
||||
// given filename.
|
||||
func writeToFile(r io.Reader, filename string) error {
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if _, err := io.Copy(file, r); err != nil {
|
||||
return fmt.Errorf("unable to write file: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getContextFromReader will read the contents of the given reader as either a
|
||||
// Dockerfile or tar archive to be extracted to a temporary directory used as
|
||||
// the context directory. Returns the absolute path to the temporary context
|
||||
// directory, the relative path of the dockerfile in that context directory,
|
||||
// and a non-nil error on success.
|
||||
func getContextFromReader(r io.Reader, dockerfileName string) (absContextDir, relDockerfile string, err error) {
|
||||
buf := bufio.NewReader(r)
|
||||
|
||||
magic, err := buf.Peek(tarHeaderSize)
|
||||
if err != nil && err != io.EOF {
|
||||
return "", "", fmt.Errorf("failed to peek context header from STDIN: %v", err)
|
||||
}
|
||||
|
||||
if absContextDir, err = ioutil.TempDir("", "docker-build-context-"); err != nil {
|
||||
return "", "", fmt.Errorf("unbale to create temporary context directory: %v", err)
|
||||
}
|
||||
|
||||
defer func(d string) {
|
||||
if err != nil {
|
||||
os.RemoveAll(d)
|
||||
}
|
||||
}(absContextDir)
|
||||
|
||||
if !archive.IsArchive(magic) { // Input should be read as a Dockerfile.
|
||||
// -f option has no meaning when we're reading it from stdin,
|
||||
// so just use our default Dockerfile name
|
||||
relDockerfile = api.DefaultDockerfileName
|
||||
|
||||
return absContextDir, relDockerfile, writeToFile(buf, filepath.Join(absContextDir, relDockerfile))
|
||||
}
|
||||
|
||||
if err := archive.Untar(buf, absContextDir, nil); err != nil {
|
||||
return "", "", fmt.Errorf("unable to extract stdin to temporary context directory: %v", err)
|
||||
}
|
||||
|
||||
return getDockerfileRelPath(absContextDir, dockerfileName)
|
||||
}
|
||||
|
||||
// getContextFromGitURL uses a Git URL as context for a `docker build`. The
|
||||
// git repo is cloned into a temporary directory used as the context directory.
|
||||
// Returns the absolute path to the temporary context directory, the relative
|
||||
// path of the dockerfile in that context directory, and a non-nil error on
|
||||
// success.
|
||||
func getContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) {
|
||||
if absContextDir, err = utils.GitClone(gitURL); err != nil {
|
||||
return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err)
|
||||
}
|
||||
|
||||
return getDockerfileRelPath(absContextDir, dockerfileName)
|
||||
}
|
||||
|
||||
// getContextFromURL uses a remote URL as context for a `docker build`. The
|
||||
// remote resource is downloaded as either a Dockerfile or a context tar
|
||||
// archive and stored in a temporary directory used as the context directory.
|
||||
// Returns the absolute path to the temporary context directory, the relative
|
||||
// path of the dockerfile in that context directory, and a non-nil error on
|
||||
// success.
|
||||
func getContextFromURL(out io.Writer, remoteURL, dockerfileName string) (absContextDir, relDockerfile string, err error) {
|
||||
response, err := httputils.Download(remoteURL)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err)
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
// Pass the response body through a progress reader.
|
||||
progReader := &progressreader.Config{
|
||||
In: response.Body,
|
||||
Out: out,
|
||||
Formatter: streamformatter.NewStreamFormatter(),
|
||||
Size: response.ContentLength,
|
||||
NewLines: true,
|
||||
ID: "",
|
||||
Action: fmt.Sprintf("Downloading build context from remote url: %s", remoteURL),
|
||||
}
|
||||
|
||||
return getContextFromReader(progReader, dockerfileName)
|
||||
}
|
||||
|
||||
// getContextFromLocalDir uses the given local directory as context for a
|
||||
// `docker build`. Returns the absolute path to the local context directory,
|
||||
// the relative path of the dockerfile in that context directory, and a non-nil
|
||||
// error on success.
|
||||
func getContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) {
|
||||
// When using a local context directory, when the Dockerfile is specified
|
||||
// with the `-f/--file` option then it is considered relative to the
|
||||
// current directory and not the context directory.
|
||||
if dockerfileName != "" {
|
||||
if dockerfileName, err = filepath.Abs(dockerfileName); err != nil {
|
||||
return "", "", fmt.Errorf("unable to get absolute path to Dockerfile: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return getDockerfileRelPath(localDir, dockerfileName)
|
||||
}
|
||||
|
||||
var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P<image>[^ \f\r\t\v\n#]+)`)
|
||||
|
||||
type trustedDockerfile struct {
|
||||
*os.File
|
||||
size int64
|
||||
}
|
||||
|
||||
func (td *trustedDockerfile) Close() error {
|
||||
td.File.Close()
|
||||
return os.Remove(td.File.Name())
|
||||
}
|
||||
|
||||
// resolvedTag records the repository, tag, and resolved digest reference
|
||||
// from a Dockerfile rewrite.
|
||||
type resolvedTag struct {
|
||||
repoInfo *registry.RepositoryInfo
|
||||
digestRef, tagRef registry.Reference
|
||||
}
|
||||
|
||||
// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in
|
||||
// "FROM <image>" instructions to a digest reference. `translator` is a
|
||||
// function that takes a repository name and tag reference and returns a
|
||||
// trusted digest reference.
|
||||
func rewriteDockerfileFrom(dockerfileName string, translator func(string, registry.Reference) (registry.Reference, error)) (newDockerfile *trustedDockerfile, resolvedTags []*resolvedTag, err error) {
|
||||
dockerfile, err := os.Open(dockerfileName)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to open Dockerfile: %v", err)
|
||||
}
|
||||
defer dockerfile.Close()
|
||||
|
||||
scanner := bufio.NewScanner(dockerfile)
|
||||
|
||||
// Make a tempfile to store the rewritten Dockerfile.
|
||||
tempFile, err := ioutil.TempFile("", "trusted-dockerfile-")
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to make temporary trusted Dockerfile: %v", err)
|
||||
}
|
||||
|
||||
trustedFile := &trustedDockerfile{
|
||||
File: tempFile,
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// Close the tempfile if there was an error during Notary lookups.
|
||||
// Otherwise the caller should close it.
|
||||
trustedFile.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Scan the lines of the Dockerfile, looking for a "FROM" line.
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
matches := dockerfileFromLinePattern.FindStringSubmatch(line)
|
||||
if matches != nil && matches[1] != "scratch" {
|
||||
// Replace the line with a resolved "FROM repo@digest"
|
||||
repo, tag := parsers.ParseRepositoryTag(matches[1])
|
||||
if tag == "" {
|
||||
tag = tags.DefaultTag
|
||||
}
|
||||
|
||||
repoInfo, err := registry.ParseRepositoryInfo(repo)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to parse repository info %q: %v", repo, err)
|
||||
}
|
||||
|
||||
ref := registry.ParseReference(tag)
|
||||
|
||||
if !ref.HasDigest() && isTrusted() {
|
||||
trustedRef, err := translator(repo, ref)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.ImageName(repo)))
|
||||
resolvedTags = append(resolvedTags, &resolvedTag{
|
||||
repoInfo: repoInfo,
|
||||
digestRef: trustedRef,
|
||||
tagRef: ref,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
n, err := fmt.Fprintln(tempFile, line)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
trustedFile.size += int64(n)
|
||||
}
|
||||
|
||||
tempFile.Seek(0, os.SEEK_SET)
|
||||
|
||||
return trustedFile, resolvedTags, scanner.Err()
|
||||
}
|
||||
|
||||
// replaceDockerfileTarWrapper wraps the given input tar archive stream and
|
||||
// replaces the entry with the given Dockerfile name with the contents of the
|
||||
// new Dockerfile. Returns a new tar archive stream with the replaced
|
||||
// Dockerfile.
|
||||
func replaceDockerfileTarWrapper(inputTarStream io.ReadCloser, newDockerfile *trustedDockerfile, dockerfileName string) io.ReadCloser {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
go func() {
|
||||
tarReader := tar.NewReader(inputTarStream)
|
||||
tarWriter := tar.NewWriter(pipeWriter)
|
||||
|
||||
defer inputTarStream.Close()
|
||||
|
||||
for {
|
||||
hdr, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
// Signals end of archive.
|
||||
tarWriter.Close()
|
||||
pipeWriter.Close()
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
var content io.Reader = tarReader
|
||||
|
||||
if hdr.Name == dockerfileName {
|
||||
// This entry is the Dockerfile. Since the tar archive was
|
||||
// generated from a directory on the local filesystem, the
|
||||
// Dockerfile will only appear once in the archive.
|
||||
hdr.Size = newDockerfile.size
|
||||
content = newDockerfile
|
||||
}
|
||||
|
||||
if err := tarWriter.WriteHeader(hdr); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := io.Copy(tarWriter, content); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return pipeReader
|
||||
}
|
169
vendor/github.com/docker/docker/api/client/cli.go
generated
vendored
169
vendor/github.com/docker/docker/api/client/cli.go
generated
vendored
@ -1,169 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cliconfig"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/sockets"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/pkg/tlsconfig"
|
||||
)
|
||||
|
||||
// DockerCli represents the docker command line client.
|
||||
// Instances of the client can be returned from NewDockerCli.
|
||||
type DockerCli struct {
|
||||
// initializing closure
|
||||
init func() error
|
||||
|
||||
// proto holds the client protocol i.e. unix.
|
||||
proto string
|
||||
// addr holds the client address.
|
||||
addr string
|
||||
// basePath holds the path to prepend to the requests
|
||||
basePath string
|
||||
|
||||
// configFile has the client configuration file
|
||||
configFile *cliconfig.ConfigFile
|
||||
// in holds the input stream and closer (io.ReadCloser) for the client.
|
||||
in io.ReadCloser
|
||||
// out holds the output stream (io.Writer) for the client.
|
||||
out io.Writer
|
||||
// err holds the error stream (io.Writer) for the client.
|
||||
err io.Writer
|
||||
// keyFile holds the key file as a string.
|
||||
keyFile string
|
||||
// tlsConfig holds the TLS configuration for the client, and will
|
||||
// set the scheme to https in NewDockerCli if present.
|
||||
tlsConfig *tls.Config
|
||||
// scheme holds the scheme of the client i.e. https.
|
||||
scheme string
|
||||
// inFd holds the file descriptor of the client's STDIN (if valid).
|
||||
inFd uintptr
|
||||
// outFd holds file descriptor of the client's STDOUT (if valid).
|
||||
outFd uintptr
|
||||
// isTerminalIn indicates whether the client's STDIN is a TTY
|
||||
isTerminalIn bool
|
||||
// isTerminalOut indicates whether the client's STDOUT is a TTY
|
||||
isTerminalOut bool
|
||||
// transport holds the client transport instance.
|
||||
transport *http.Transport
|
||||
}
|
||||
|
||||
// Initialize calls the init function that will setup the configuration for the client
|
||||
// such as the TLS, tcp and other parameters used to run the client.
|
||||
func (cli *DockerCli) Initialize() error {
|
||||
if cli.init == nil {
|
||||
return nil
|
||||
}
|
||||
return cli.init()
|
||||
}
|
||||
|
||||
// CheckTtyInput checks if we are trying to attach to a container tty
|
||||
// from a non-tty client input stream, and if so, returns an error.
|
||||
func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error {
|
||||
// In order to attach to a container tty, input stream for the client must
|
||||
// be a tty itself: redirecting or piping the client standard input is
|
||||
// incompatible with `docker run -t`, `docker exec -t` or `docker attach`.
|
||||
if ttyMode && attachStdin && !cli.isTerminalIn {
|
||||
return errors.New("cannot enable tty mode on non tty input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PsFormat returns the format string specified in the configuration.
|
||||
// String contains columns and format specification, for example {{ID}\t{{Name}}.
|
||||
func (cli *DockerCli) PsFormat() string {
|
||||
return cli.configFile.PsFormat
|
||||
}
|
||||
|
||||
// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err.
|
||||
// The key file, protocol (i.e. unix) and address are passed in as strings, along with the tls.Config. If the tls.Config
|
||||
// is set the client scheme will be set to https.
|
||||
// The client will be given a 32-second timeout (see https://github.com/docker/docker/pull/8035).
|
||||
func NewDockerCli(in io.ReadCloser, out, err io.Writer, clientFlags *cli.ClientFlags) *DockerCli {
|
||||
cli := &DockerCli{
|
||||
in: in,
|
||||
out: out,
|
||||
err: err,
|
||||
keyFile: clientFlags.Common.TrustKey,
|
||||
}
|
||||
|
||||
cli.init = func() error {
|
||||
|
||||
clientFlags.PostParse()
|
||||
|
||||
hosts := clientFlags.Common.Hosts
|
||||
|
||||
switch len(hosts) {
|
||||
case 0:
|
||||
hosts = []string{os.Getenv("DOCKER_HOST")}
|
||||
case 1:
|
||||
// only accept one host to talk to
|
||||
default:
|
||||
return errors.New("Please specify only one -H")
|
||||
}
|
||||
|
||||
defaultHost := opts.DefaultTCPHost
|
||||
if clientFlags.Common.TLSOptions != nil {
|
||||
defaultHost = opts.DefaultTLSHost
|
||||
}
|
||||
|
||||
var e error
|
||||
if hosts[0], e = opts.ParseHost(defaultHost, hosts[0]); e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
protoAddrParts := strings.SplitN(hosts[0], "://", 2)
|
||||
cli.proto, cli.addr = protoAddrParts[0], protoAddrParts[1]
|
||||
|
||||
if cli.proto == "tcp" {
|
||||
// error is checked in pkg/parsers already
|
||||
parsed, _ := url.Parse("tcp://" + cli.addr)
|
||||
cli.addr = parsed.Host
|
||||
cli.basePath = parsed.Path
|
||||
}
|
||||
|
||||
if clientFlags.Common.TLSOptions != nil {
|
||||
cli.scheme = "https"
|
||||
var e error
|
||||
cli.tlsConfig, e = tlsconfig.Client(*clientFlags.Common.TLSOptions)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
} else {
|
||||
cli.scheme = "http"
|
||||
}
|
||||
|
||||
if cli.in != nil {
|
||||
cli.inFd, cli.isTerminalIn = term.GetFdInfo(cli.in)
|
||||
}
|
||||
if cli.out != nil {
|
||||
cli.outFd, cli.isTerminalOut = term.GetFdInfo(cli.out)
|
||||
}
|
||||
|
||||
// The transport is created here for reuse during the client session.
|
||||
cli.transport = &http.Transport{
|
||||
TLSClientConfig: cli.tlsConfig,
|
||||
}
|
||||
sockets.ConfigureTCPTransport(cli.transport, cli.proto, cli.addr)
|
||||
|
||||
configFile, e := cliconfig.Load(cliconfig.ConfigDir())
|
||||
if e != nil {
|
||||
fmt.Fprintf(cli.err, "WARNING: Error loading config file:%v\n", e)
|
||||
}
|
||||
cli.configFile = configFile
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return cli
|
||||
}
|
5
vendor/github.com/docker/docker/api/client/client.go
generated
vendored
5
vendor/github.com/docker/docker/api/client/client.go
generated
vendored
@ -1,5 +0,0 @@
|
||||
// Package client provides a command-line interface for Docker.
|
||||
//
|
||||
// Run "docker help SUBCOMMAND" or "docker SUBCOMMAND --help" to see more information on any Docker subcommand, including the full list of options supported for the subcommand.
|
||||
// See https://docs.docker.com/installation/ for instructions on installing Docker.
|
||||
package client
|
84
vendor/github.com/docker/docker/api/client/commit.go
generated
vendored
84
vendor/github.com/docker/docker/api/client/commit.go
generated
vendored
@ -1,84 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/opts"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/docker/runconfig"
|
||||
)
|
||||
|
||||
// CmdCommit creates a new image from a container's changes.
|
||||
//
|
||||
// Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]
|
||||
func (cli *DockerCli) CmdCommit(args ...string) error {
|
||||
cmd := Cli.Subcmd("commit", []string{"CONTAINER [REPOSITORY[:TAG]]"}, Cli.DockerCommands["commit"].Description, true)
|
||||
flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit")
|
||||
flComment := cmd.String([]string{"m", "-message"}, "", "Commit message")
|
||||
flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith <hannibal@a-team.com>\")")
|
||||
flChanges := opts.NewListOpts(nil)
|
||||
cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image")
|
||||
// FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands.
|
||||
flConfig := cmd.String([]string{"#run", "#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands")
|
||||
cmd.Require(flag.Max, 2)
|
||||
cmd.Require(flag.Min, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var (
|
||||
name = cmd.Arg(0)
|
||||
repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1))
|
||||
)
|
||||
|
||||
//Check if the given image name can be resolved
|
||||
if repository != "" {
|
||||
if err := registry.ValidateRepositoryName(repository); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("container", name)
|
||||
v.Set("repo", repository)
|
||||
v.Set("tag", tag)
|
||||
v.Set("comment", *flComment)
|
||||
v.Set("author", *flAuthor)
|
||||
for _, change := range flChanges.GetAll() {
|
||||
v.Add("changes", change)
|
||||
}
|
||||
|
||||
if *flPause != true {
|
||||
v.Set("pause", "0")
|
||||
}
|
||||
|
||||
var (
|
||||
config *runconfig.Config
|
||||
response types.ContainerCommitResponse
|
||||
)
|
||||
|
||||
if *flConfig != "" {
|
||||
config = &runconfig.Config{}
|
||||
if err := json.Unmarshal([]byte(*flConfig), config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
serverResp, err := cli.call("POST", "/commit?"+v.Encode(), config, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintln(cli.out, response.ID)
|
||||
return nil
|
||||
}
|
325
vendor/github.com/docker/docker/api/client/cp.go
generated
vendored
325
vendor/github.com/docker/docker/api/client/cp.go
generated
vendored
@ -1,325 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
type copyDirection int
|
||||
|
||||
const (
|
||||
fromContainer copyDirection = (1 << iota)
|
||||
toContainer
|
||||
acrossContainers = fromContainer | toContainer
|
||||
)
|
||||
|
||||
// CmdCp copies files/folders to or from a path in a container.
|
||||
//
|
||||
// When copying from a container, if LOCALPATH is '-' the data is written as a
|
||||
// tar archive file to STDOUT.
|
||||
//
|
||||
// When copying to a container, if LOCALPATH is '-' the data is read as a tar
|
||||
// archive file from STDIN, and the destination CONTAINER:PATH, must specify
|
||||
// a directory.
|
||||
//
|
||||
// Usage:
|
||||
// docker cp CONTAINER:PATH LOCALPATH|-
|
||||
// docker cp LOCALPATH|- CONTAINER:PATH
|
||||
func (cli *DockerCli) CmdCp(args ...string) error {
|
||||
cmd := Cli.Subcmd(
|
||||
"cp",
|
||||
[]string{"CONTAINER:PATH LOCALPATH|-", "LOCALPATH|- CONTAINER:PATH"},
|
||||
strings.Join([]string{
|
||||
Cli.DockerCommands["cp"].Description,
|
||||
"\nUse '-' as the source to read a tar archive from stdin\n",
|
||||
"and extract it to a directory destination in a container.\n",
|
||||
"Use '-' as the destination to stream a tar archive of a\n",
|
||||
"container source to stdout.",
|
||||
}, ""),
|
||||
true,
|
||||
)
|
||||
|
||||
cmd.Require(flag.Exact, 2)
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
if cmd.Arg(0) == "" {
|
||||
return fmt.Errorf("source can not be empty")
|
||||
}
|
||||
if cmd.Arg(1) == "" {
|
||||
return fmt.Errorf("destination can not be empty")
|
||||
}
|
||||
|
||||
srcContainer, srcPath := splitCpArg(cmd.Arg(0))
|
||||
dstContainer, dstPath := splitCpArg(cmd.Arg(1))
|
||||
|
||||
var direction copyDirection
|
||||
if srcContainer != "" {
|
||||
direction |= fromContainer
|
||||
}
|
||||
if dstContainer != "" {
|
||||
direction |= toContainer
|
||||
}
|
||||
|
||||
switch direction {
|
||||
case fromContainer:
|
||||
return cli.copyFromContainer(srcContainer, srcPath, dstPath)
|
||||
case toContainer:
|
||||
return cli.copyToContainer(srcPath, dstContainer, dstPath)
|
||||
case acrossContainers:
|
||||
// Copying between containers isn't supported.
|
||||
return fmt.Errorf("copying between containers is not supported")
|
||||
default:
|
||||
// User didn't specify any container.
|
||||
return fmt.Errorf("must specify at least one container source")
|
||||
}
|
||||
}
|
||||
|
||||
// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be
|
||||
// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by
|
||||
// requiring a LOCALPATH with a `:` to be made explicit with a relative or
|
||||
// absolute path:
|
||||
// `/path/to/file:name.txt` or `./file:name.txt`
|
||||
//
|
||||
// This is apparently how `scp` handles this as well:
|
||||
// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/
|
||||
//
|
||||
// We can't simply check for a filepath separator because container names may
|
||||
// have a separator, e.g., "host0/cname1" if container is in a Docker cluster,
|
||||
// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows
|
||||
// client, a `:` could be part of an absolute Windows path, in which case it
|
||||
// is immediately proceeded by a backslash.
|
||||
func splitCpArg(arg string) (container, path string) {
|
||||
if system.IsAbs(arg) {
|
||||
// Explicit local absolute path, e.g., `C:\foo` or `/foo`.
|
||||
return "", arg
|
||||
}
|
||||
|
||||
parts := strings.SplitN(arg, ":", 2)
|
||||
|
||||
if len(parts) == 1 || strings.HasPrefix(parts[0], ".") {
|
||||
// Either there's no `:` in the arg
|
||||
// OR it's an explicit local relative path like `./file:name.txt`.
|
||||
return "", arg
|
||||
}
|
||||
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
|
||||
func (cli *DockerCli) statContainerPath(containerName, path string) (types.ContainerPathStat, error) {
|
||||
var stat types.ContainerPathStat
|
||||
|
||||
query := make(url.Values, 1)
|
||||
query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
|
||||
|
||||
urlStr := fmt.Sprintf("/containers/%s/archive?%s", containerName, query.Encode())
|
||||
|
||||
response, err := cli.call("HEAD", urlStr, nil, nil)
|
||||
if err != nil {
|
||||
return stat, err
|
||||
}
|
||||
defer response.body.Close()
|
||||
|
||||
if response.statusCode != http.StatusOK {
|
||||
return stat, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
|
||||
}
|
||||
|
||||
return getContainerPathStatFromHeader(response.header)
|
||||
}
|
||||
|
||||
func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) {
|
||||
var stat types.ContainerPathStat
|
||||
|
||||
encodedStat := header.Get("X-Docker-Container-Path-Stat")
|
||||
statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat))
|
||||
|
||||
err := json.NewDecoder(statDecoder).Decode(&stat)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("unable to decode container path stat header: %s", err)
|
||||
}
|
||||
|
||||
return stat, err
|
||||
}
|
||||
|
||||
func resolveLocalPath(localPath string) (absPath string, err error) {
|
||||
if absPath, err = filepath.Abs(localPath); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string) (err error) {
|
||||
if dstPath != "-" {
|
||||
// Get an absolute destination path.
|
||||
dstPath, err = resolveLocalPath(dstPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
query := make(url.Values, 1)
|
||||
query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
|
||||
|
||||
urlStr := fmt.Sprintf("/containers/%s/archive?%s", srcContainer, query.Encode())
|
||||
|
||||
response, err := cli.call("GET", urlStr, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer response.body.Close()
|
||||
|
||||
if response.statusCode != http.StatusOK {
|
||||
return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
|
||||
}
|
||||
|
||||
if dstPath == "-" {
|
||||
// Send the response to STDOUT.
|
||||
_, err = io.Copy(os.Stdout, response.body)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// In order to get the copy behavior right, we need to know information
|
||||
// about both the source and the destination. The response headers include
|
||||
// stat info about the source that we can use in deciding exactly how to
|
||||
// copy it locally. Along with the stat info about the local destination,
|
||||
// we have everything we need to handle the multiple possibilities there
|
||||
// can be when copying a file/dir from one location to another file/dir.
|
||||
stat, err := getContainerPathStatFromHeader(response.header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get resource stat from response: %s", err)
|
||||
}
|
||||
|
||||
// Prepare source copy info.
|
||||
srcInfo := archive.CopyInfo{
|
||||
Path: srcPath,
|
||||
Exists: true,
|
||||
IsDir: stat.Mode.IsDir(),
|
||||
}
|
||||
|
||||
// See comments in the implementation of `archive.CopyTo` for exactly what
|
||||
// goes into deciding how and whether the source archive needs to be
|
||||
// altered for the correct copy behavior.
|
||||
return archive.CopyTo(response.body, srcInfo, dstPath)
|
||||
}
|
||||
|
||||
func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (err error) {
|
||||
if srcPath != "-" {
|
||||
// Get an absolute source path.
|
||||
srcPath, err = resolveLocalPath(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// In order to get the copy behavior right, we need to know information
|
||||
// about both the source and destination. The API is a simple tar
|
||||
// archive/extract API but we can use the stat info header about the
|
||||
// destination to be more informed about exactly what the destination is.
|
||||
|
||||
// Prepare destination copy info by stat-ing the container path.
|
||||
dstInfo := archive.CopyInfo{Path: dstPath}
|
||||
dstStat, err := cli.statContainerPath(dstContainer, dstPath)
|
||||
|
||||
// If the destination is a symbolic link, we should evaluate it.
|
||||
if err == nil && dstStat.Mode&os.ModeSymlink != 0 {
|
||||
linkTarget := dstStat.LinkTarget
|
||||
if !system.IsAbs(linkTarget) {
|
||||
// Join with the parent directory.
|
||||
dstParent, _ := archive.SplitPathDirEntry(dstPath)
|
||||
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||
}
|
||||
|
||||
dstInfo.Path = linkTarget
|
||||
dstStat, err = cli.statContainerPath(dstContainer, linkTarget)
|
||||
}
|
||||
|
||||
// Ignore any error and assume that the parent directory of the destination
|
||||
// path exists, in which case the copy may still succeed. If there is any
|
||||
// type of conflict (e.g., non-directory overwriting an existing directory
|
||||
// or vice versia) the extraction will fail. If the destination simply did
|
||||
// not exist, but the parent directory does, the extraction will still
|
||||
// succeed.
|
||||
if err == nil {
|
||||
dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir()
|
||||
}
|
||||
|
||||
var (
|
||||
content io.Reader
|
||||
resolvedDstPath string
|
||||
)
|
||||
|
||||
if srcPath == "-" {
|
||||
// Use STDIN.
|
||||
content = os.Stdin
|
||||
resolvedDstPath = dstInfo.Path
|
||||
if !dstInfo.IsDir {
|
||||
return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath))
|
||||
}
|
||||
} else {
|
||||
// Prepare source copy info.
|
||||
srcInfo, err := archive.CopyInfoSourcePath(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srcArchive, err := archive.TarResource(srcInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer srcArchive.Close()
|
||||
|
||||
// With the stat info about the local source as well as the
|
||||
// destination, we have enough information to know whether we need to
|
||||
// alter the archive that we upload so that when the server extracts
|
||||
// it to the specified directory in the container we get the disired
|
||||
// copy behavior.
|
||||
|
||||
// See comments in the implementation of `archive.PrepareArchiveCopy`
|
||||
// for exactly what goes into deciding how and whether the source
|
||||
// archive needs to be altered for the correct copy behavior when it is
|
||||
// extracted. This function also infers from the source and destination
|
||||
// info which directory to extract to, which may be the parent of the
|
||||
// destination that the user specified.
|
||||
dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer preparedArchive.Close()
|
||||
|
||||
resolvedDstPath = dstDir
|
||||
content = preparedArchive
|
||||
}
|
||||
|
||||
query := make(url.Values, 2)
|
||||
query.Set("path", filepath.ToSlash(resolvedDstPath)) // Normalize the paths used in the API.
|
||||
// Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
|
||||
query.Set("noOverwriteDirNonDir", "true")
|
||||
|
||||
urlStr := fmt.Sprintf("/containers/%s/archive?%s", dstContainer, query.Encode())
|
||||
|
||||
response, err := cli.stream("PUT", urlStr, &streamOpts{in: content})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer response.body.Close()
|
||||
|
||||
if response.statusCode != http.StatusOK {
|
||||
return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
185
vendor/github.com/docker/docker/api/client/create.go
generated
vendored
185
vendor/github.com/docker/docker/api/client/create.go
generated
vendored
@ -1,185 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/graph/tags"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/docker/runconfig"
|
||||
)
|
||||
|
||||
func (cli *DockerCli) pullImage(image string) error {
|
||||
return cli.pullImageCustomOut(image, cli.out)
|
||||
}
|
||||
|
||||
func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error {
|
||||
v := url.Values{}
|
||||
repos, tag := parsers.ParseRepositoryTag(image)
|
||||
// pull only the image tagged 'latest' if no tag was specified
|
||||
if tag == "" {
|
||||
tag = tags.DefaultTag
|
||||
}
|
||||
v.Set("fromImage", repos)
|
||||
v.Set("tag", tag)
|
||||
|
||||
// Resolve the Repository name from fqn to RepositoryInfo
|
||||
repoInfo, err := registry.ParseRepositoryInfo(repos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Resolve the Auth config relevant for this server
|
||||
authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index)
|
||||
buf, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
registryAuthHeader := []string{
|
||||
base64.URLEncoding.EncodeToString(buf),
|
||||
}
|
||||
sopts := &streamOpts{
|
||||
rawTerminal: true,
|
||||
out: out,
|
||||
headers: map[string][]string{"X-Registry-Auth": registryAuthHeader},
|
||||
}
|
||||
if _, err := cli.stream("POST", "/images/create?"+v.Encode(), sopts); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type cidFile struct {
|
||||
path string
|
||||
file *os.File
|
||||
written bool
|
||||
}
|
||||
|
||||
func newCIDFile(path string) (*cidFile, error) {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path)
|
||||
}
|
||||
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to create the container ID file: %s", err)
|
||||
}
|
||||
|
||||
return &cidFile{path: path, file: f}, nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runconfig.HostConfig, cidfile, name string) (*types.ContainerCreateResponse, error) {
|
||||
containerValues := url.Values{}
|
||||
if name != "" {
|
||||
containerValues.Set("name", name)
|
||||
}
|
||||
|
||||
mergedConfig := runconfig.MergeConfigs(config, hostConfig)
|
||||
|
||||
var containerIDFile *cidFile
|
||||
if cidfile != "" {
|
||||
var err error
|
||||
if containerIDFile, err = newCIDFile(cidfile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer containerIDFile.Close()
|
||||
}
|
||||
|
||||
repo, tag := parsers.ParseRepositoryTag(config.Image)
|
||||
if tag == "" {
|
||||
tag = tags.DefaultTag
|
||||
}
|
||||
|
||||
ref := registry.ParseReference(tag)
|
||||
var trustedRef registry.Reference
|
||||
|
||||
if isTrusted() && !ref.HasDigest() {
|
||||
var err error
|
||||
trustedRef, err = cli.trustedReference(repo, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.Image = trustedRef.ImageName(repo)
|
||||
}
|
||||
|
||||
//create the container
|
||||
serverResp, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, nil)
|
||||
//if image not found try to pull it
|
||||
if serverResp.statusCode == 404 && strings.Contains(err.Error(), config.Image) {
|
||||
fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", ref.ImageName(repo))
|
||||
|
||||
// we don't want to write to stdout anything apart from container.ID
|
||||
if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if trustedRef != nil && !ref.HasDigest() {
|
||||
repoInfo, err := registry.ParseRepositoryInfo(repo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.tagTrusted(repoInfo, trustedRef, ref); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Retry
|
||||
if serverResp, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
var response types.ContainerCreateResponse
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&response); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, warning := range response.Warnings {
|
||||
fmt.Fprintf(cli.err, "WARNING: %s\n", warning)
|
||||
}
|
||||
if containerIDFile != nil {
|
||||
if err = containerIDFile.Write(response.ID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
// CmdCreate creates a new container from a given image.
|
||||
//
|
||||
// Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...]
|
||||
func (cli *DockerCli) CmdCreate(args ...string) error {
|
||||
cmd := Cli.Subcmd("create", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["create"].Description, true)
|
||||
addTrustedFlags(cmd, true)
|
||||
|
||||
// These are flags not stored in Config/HostConfig
|
||||
var (
|
||||
flName = cmd.String([]string{"-name"}, "", "Assign a name to the container")
|
||||
)
|
||||
|
||||
config, hostConfig, cmd, err := runconfig.Parse(cmd, args)
|
||||
if err != nil {
|
||||
cmd.ReportError(err.Error(), true)
|
||||
os.Exit(1)
|
||||
}
|
||||
if config.Image == "" {
|
||||
cmd.Usage()
|
||||
return nil
|
||||
}
|
||||
response, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(cli.out, "%s\n", response.ID)
|
||||
return nil
|
||||
}
|
56
vendor/github.com/docker/docker/api/client/diff.go
generated
vendored
56
vendor/github.com/docker/docker/api/client/diff.go
generated
vendored
@ -1,56 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
)
|
||||
|
||||
// CmdDiff shows changes on a container's filesystem.
|
||||
//
|
||||
// Each changed file is printed on a separate line, prefixed with a single
|
||||
// character that indicates the status of the file: C (modified), A (added),
|
||||
// or D (deleted).
|
||||
//
|
||||
// Usage: docker diff CONTAINER
|
||||
func (cli *DockerCli) CmdDiff(args ...string) error {
|
||||
cmd := Cli.Subcmd("diff", []string{"CONTAINER"}, Cli.DockerCommands["diff"].Description, true)
|
||||
cmd.Require(flag.Exact, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
if cmd.Arg(0) == "" {
|
||||
return fmt.Errorf("Container name cannot be empty")
|
||||
}
|
||||
|
||||
serverResp, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
changes := []types.ContainerChange{}
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&changes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, change := range changes {
|
||||
var kind string
|
||||
switch change.Kind {
|
||||
case archive.ChangeModify:
|
||||
kind = "C"
|
||||
case archive.ChangeAdd:
|
||||
kind = "A"
|
||||
case archive.ChangeDelete:
|
||||
kind = "D"
|
||||
}
|
||||
fmt.Fprintf(cli.out, "%s %s\n", kind, change.Path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
63
vendor/github.com/docker/docker/api/client/events.go
generated
vendored
63
vendor/github.com/docker/docker/api/client/events.go
generated
vendored
@ -1,63 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/opts"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/parsers/filters"
|
||||
"github.com/docker/docker/pkg/timeutils"
|
||||
)
|
||||
|
||||
// CmdEvents prints a live stream of real time events from the server.
|
||||
//
|
||||
// Usage: docker events [OPTIONS]
|
||||
func (cli *DockerCli) CmdEvents(args ...string) error {
|
||||
cmd := Cli.Subcmd("events", nil, Cli.DockerCommands["events"].Description, true)
|
||||
since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp")
|
||||
until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp")
|
||||
flFilter := opts.NewListOpts(nil)
|
||||
cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided")
|
||||
cmd.Require(flag.Exact, 0)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var (
|
||||
v = url.Values{}
|
||||
eventFilterArgs = filters.Args{}
|
||||
)
|
||||
|
||||
// Consolidate all filter flags, and sanity check them early.
|
||||
// They'll get process in the daemon/server.
|
||||
for _, f := range flFilter.GetAll() {
|
||||
var err error
|
||||
eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ref := time.Now()
|
||||
if *since != "" {
|
||||
v.Set("since", timeutils.GetTimestamp(*since, ref))
|
||||
}
|
||||
if *until != "" {
|
||||
v.Set("until", timeutils.GetTimestamp(*until, ref))
|
||||
}
|
||||
if len(eventFilterArgs) > 0 {
|
||||
filterJSON, err := filters.ToParam(eventFilterArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set("filters", filterJSON)
|
||||
}
|
||||
sopts := &streamOpts{
|
||||
rawTerminal: true,
|
||||
out: cli.out,
|
||||
}
|
||||
if _, err := cli.stream("GET", "/events?"+v.Encode(), sopts); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
134
vendor/github.com/docker/docker/api/client/exec.go
generated
vendored
134
vendor/github.com/docker/docker/api/client/exec.go
generated
vendored
@ -1,134 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/pkg/promise"
|
||||
"github.com/docker/docker/runconfig"
|
||||
)
|
||||
|
||||
// CmdExec runs a command in a running container.
|
||||
//
|
||||
// Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...]
|
||||
func (cli *DockerCli) CmdExec(args ...string) error {
|
||||
cmd := Cli.Subcmd("exec", []string{"CONTAINER COMMAND [ARG...]"}, Cli.DockerCommands["exec"].Description, true)
|
||||
|
||||
execConfig, err := runconfig.ParseExec(cmd, args)
|
||||
// just in case the ParseExec does not exit
|
||||
if execConfig.Container == "" || err != nil {
|
||||
return Cli.StatusError{StatusCode: 1}
|
||||
}
|
||||
|
||||
serverResp, err := cli.call("POST", "/containers/"+execConfig.Container+"/exec", execConfig, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
var response types.ContainerExecCreateResponse
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
execID := response.ID
|
||||
|
||||
if execID == "" {
|
||||
fmt.Fprintf(cli.out, "exec ID empty")
|
||||
return nil
|
||||
}
|
||||
|
||||
//Temp struct for execStart so that we don't need to transfer all the execConfig
|
||||
execStartCheck := &types.ExecStartCheck{
|
||||
Detach: execConfig.Detach,
|
||||
Tty: execConfig.Tty,
|
||||
}
|
||||
|
||||
if !execConfig.Detach {
|
||||
if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execStartCheck, nil)); err != nil {
|
||||
return err
|
||||
}
|
||||
// For now don't print this - wait for when we support exec wait()
|
||||
// fmt.Fprintf(cli.out, "%s\n", execID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Interactive exec requested.
|
||||
var (
|
||||
out, stderr io.Writer
|
||||
in io.ReadCloser
|
||||
hijacked = make(chan io.Closer)
|
||||
errCh chan error
|
||||
)
|
||||
|
||||
// Block the return until the chan gets closed
|
||||
defer func() {
|
||||
logrus.Debugf("End of CmdExec(), Waiting for hijack to finish.")
|
||||
if _, ok := <-hijacked; ok {
|
||||
fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)")
|
||||
}
|
||||
}()
|
||||
|
||||
if execConfig.AttachStdin {
|
||||
in = cli.in
|
||||
}
|
||||
if execConfig.AttachStdout {
|
||||
out = cli.out
|
||||
}
|
||||
if execConfig.AttachStderr {
|
||||
if execConfig.Tty {
|
||||
stderr = cli.out
|
||||
} else {
|
||||
stderr = cli.err
|
||||
}
|
||||
}
|
||||
errCh = promise.Go(func() error {
|
||||
return cli.hijackWithContentType("POST", "/exec/"+execID+"/start", "application/json", execConfig.Tty, in, out, stderr, hijacked, execConfig)
|
||||
})
|
||||
|
||||
// Acknowledge the hijack before starting
|
||||
select {
|
||||
case closer := <-hijacked:
|
||||
// Make sure that hijack gets closed when returning. (result
|
||||
// in closing hijack chan and freeing server's goroutines.
|
||||
if closer != nil {
|
||||
defer closer.Close()
|
||||
}
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
logrus.Debugf("Error hijack: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if execConfig.Tty && cli.isTerminalIn {
|
||||
if err := cli.monitorTtySize(execID, true); err != nil {
|
||||
fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := <-errCh; err != nil {
|
||||
logrus.Debugf("Error hijack: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
var status int
|
||||
if _, status, err = getExecExitCode(cli, execID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if status != 0 {
|
||||
return Cli.StatusError{StatusCode: status}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
46
vendor/github.com/docker/docker/api/client/export.go
generated
vendored
46
vendor/github.com/docker/docker/api/client/export.go
generated
vendored
@ -1,46 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
)
|
||||
|
||||
// CmdExport exports a filesystem as a tar archive.
|
||||
//
|
||||
// The tar archive is streamed to STDOUT by default or written to a file.
|
||||
//
|
||||
// Usage: docker export [OPTIONS] CONTAINER
|
||||
func (cli *DockerCli) CmdExport(args ...string) error {
|
||||
cmd := Cli.Subcmd("export", []string{"CONTAINER"}, Cli.DockerCommands["export"].Description, true)
|
||||
outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT")
|
||||
cmd.Require(flag.Exact, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var (
|
||||
output = cli.out
|
||||
err error
|
||||
)
|
||||
if *outfile != "" {
|
||||
output, err = os.Create(*outfile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if cli.isTerminalOut {
|
||||
return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.")
|
||||
}
|
||||
|
||||
image := cmd.Arg(0)
|
||||
sopts := &streamOpts{
|
||||
rawTerminal: true,
|
||||
out: output,
|
||||
}
|
||||
if _, err := cli.stream("GET", "/containers/"+image+"/export", sopts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
258
vendor/github.com/docker/docker/api/client/hijack.go
generated
vendored
258
vendor/github.com/docker/docker/api/client/hijack.go
generated
vendored
@ -1,258 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
)
|
||||
|
||||
type tlsClientCon struct {
|
||||
*tls.Conn
|
||||
rawConn net.Conn
|
||||
}
|
||||
|
||||
func (c *tlsClientCon) CloseWrite() error {
|
||||
// Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
|
||||
// on its underlying connection.
|
||||
if cwc, ok := c.rawConn.(interface {
|
||||
CloseWrite() error
|
||||
}); ok {
|
||||
return cwc.CloseWrite()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
|
||||
return tlsDialWithDialer(new(net.Dialer), network, addr, config)
|
||||
}
|
||||
|
||||
// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in
|
||||
// order to return our custom tlsClientCon struct which holds both the tls.Conn
|
||||
// object _and_ its underlying raw connection. The rationale for this is that
|
||||
// we need to be able to close the write end of the connection when attaching,
|
||||
// which tls.Conn does not provide.
|
||||
func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
|
||||
// We want the Timeout and Deadline values from dialer to cover the
|
||||
// whole process: TCP connection and TLS handshake. This means that we
|
||||
// also need to start our own timers now.
|
||||
timeout := dialer.Timeout
|
||||
|
||||
if !dialer.Deadline.IsZero() {
|
||||
deadlineTimeout := dialer.Deadline.Sub(time.Now())
|
||||
if timeout == 0 || deadlineTimeout < timeout {
|
||||
timeout = deadlineTimeout
|
||||
}
|
||||
}
|
||||
|
||||
var errChannel chan error
|
||||
|
||||
if timeout != 0 {
|
||||
errChannel = make(chan error, 2)
|
||||
time.AfterFunc(timeout, func() {
|
||||
errChannel <- errors.New("")
|
||||
})
|
||||
}
|
||||
|
||||
rawConn, err := dialer.Dial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// When we set up a TCP connection for hijack, there could be long periods
|
||||
// of inactivity (a long running command with no output) that in certain
|
||||
// network setups may cause ECONNTIMEOUT, leaving the client in an unknown
|
||||
// state. Setting TCP KeepAlive on the socket connection will prohibit
|
||||
// ECONNTIMEOUT unless the socket connection truly is broken
|
||||
if tcpConn, ok := rawConn.(*net.TCPConn); ok {
|
||||
tcpConn.SetKeepAlive(true)
|
||||
tcpConn.SetKeepAlivePeriod(30 * time.Second)
|
||||
}
|
||||
|
||||
colonPos := strings.LastIndex(addr, ":")
|
||||
if colonPos == -1 {
|
||||
colonPos = len(addr)
|
||||
}
|
||||
hostname := addr[:colonPos]
|
||||
|
||||
// If no ServerName is set, infer the ServerName
|
||||
// from the hostname we're connecting to.
|
||||
if config.ServerName == "" {
|
||||
// Make a copy to avoid polluting argument or default.
|
||||
c := *config
|
||||
c.ServerName = hostname
|
||||
config = &c
|
||||
}
|
||||
|
||||
conn := tls.Client(rawConn, config)
|
||||
|
||||
if timeout == 0 {
|
||||
err = conn.Handshake()
|
||||
} else {
|
||||
go func() {
|
||||
errChannel <- conn.Handshake()
|
||||
}()
|
||||
|
||||
err = <-errChannel
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
rawConn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// This is Docker difference with standard's crypto/tls package: returned a
|
||||
// wrapper which holds both the TLS and raw connections.
|
||||
return &tlsClientCon{conn, rawConn}, nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) dial() (net.Conn, error) {
|
||||
if cli.tlsConfig != nil && cli.proto != "unix" {
|
||||
// Notice this isn't Go standard's tls.Dial function
|
||||
return tlsDial(cli.proto, cli.addr, cli.tlsConfig)
|
||||
}
|
||||
return net.Dial(cli.proto, cli.addr)
|
||||
}
|
||||
|
||||
func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer, data interface{}) error {
|
||||
return cli.hijackWithContentType(method, path, "text/plain", setRawTerminal, in, stdout, stderr, started, data)
|
||||
}
|
||||
|
||||
func (cli *DockerCli) hijackWithContentType(method, path, contentType string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer, data interface{}) error {
|
||||
defer func() {
|
||||
if started != nil {
|
||||
close(started)
|
||||
}
|
||||
}()
|
||||
|
||||
params, err := cli.encodeData(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest(method, fmt.Sprintf("%s/v%s%s", cli.basePath, api.Version, path), params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add CLI Config's HTTP Headers BEFORE we set the Docker headers
|
||||
// then the user can't change OUR headers
|
||||
for k, v := range cli.configFile.HTTPHeaders {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
|
||||
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.Version+" ("+runtime.GOOS+")")
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
req.Header.Set("Connection", "Upgrade")
|
||||
req.Header.Set("Upgrade", "tcp")
|
||||
req.Host = cli.addr
|
||||
|
||||
dial, err := cli.dial()
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// When we set up a TCP connection for hijack, there could be long periods
|
||||
// of inactivity (a long running command with no output) that in certain
|
||||
// network setups may cause ECONNTIMEOUT, leaving the client in an unknown
|
||||
// state. Setting TCP KeepAlive on the socket connection will prohibit
|
||||
// ECONNTIMEOUT unless the socket connection truly is broken
|
||||
if tcpConn, ok := dial.(*net.TCPConn); ok {
|
||||
tcpConn.SetKeepAlive(true)
|
||||
tcpConn.SetKeepAlivePeriod(30 * time.Second)
|
||||
}
|
||||
|
||||
clientconn := httputil.NewClientConn(dial, nil)
|
||||
defer clientconn.Close()
|
||||
|
||||
// Server hijacks the connection, error 'connection closed' expected
|
||||
clientconn.Do(req)
|
||||
|
||||
rwc, br := clientconn.Hijack()
|
||||
defer rwc.Close()
|
||||
|
||||
if started != nil {
|
||||
started <- rwc
|
||||
}
|
||||
|
||||
var oldState *term.State
|
||||
|
||||
if in != nil && setRawTerminal && cli.isTerminalIn && os.Getenv("NORAW") == "" {
|
||||
oldState, err = term.SetRawTerminal(cli.inFd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer term.RestoreTerminal(cli.inFd, oldState)
|
||||
}
|
||||
|
||||
receiveStdout := make(chan error, 1)
|
||||
if stdout != nil || stderr != nil {
|
||||
go func() {
|
||||
defer func() {
|
||||
if in != nil {
|
||||
if setRawTerminal && cli.isTerminalIn {
|
||||
term.RestoreTerminal(cli.inFd, oldState)
|
||||
}
|
||||
in.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// When TTY is ON, use regular copy
|
||||
if setRawTerminal && stdout != nil {
|
||||
_, err = io.Copy(stdout, br)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(stdout, stderr, br)
|
||||
}
|
||||
logrus.Debugf("[hijack] End of stdout")
|
||||
receiveStdout <- err
|
||||
}()
|
||||
}
|
||||
|
||||
stdinDone := make(chan struct{})
|
||||
go func() {
|
||||
if in != nil {
|
||||
io.Copy(rwc, in)
|
||||
logrus.Debugf("[hijack] End of stdin")
|
||||
}
|
||||
|
||||
if conn, ok := rwc.(interface {
|
||||
CloseWrite() error
|
||||
}); ok {
|
||||
if err := conn.CloseWrite(); err != nil {
|
||||
logrus.Debugf("Couldn't send EOF: %s", err)
|
||||
}
|
||||
}
|
||||
close(stdinDone)
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-receiveStdout:
|
||||
if err != nil {
|
||||
logrus.Debugf("Error receiveStdout: %s", err)
|
||||
return err
|
||||
}
|
||||
case <-stdinDone:
|
||||
if stdout != nil || stderr != nil {
|
||||
if err := <-receiveStdout; err != nil {
|
||||
logrus.Debugf("Error receiveStdout: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
83
vendor/github.com/docker/docker/api/client/history.go
generated
vendored
83
vendor/github.com/docker/docker/api/client/history.go
generated
vendored
@ -1,83 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/stringutils"
|
||||
"github.com/docker/docker/pkg/units"
|
||||
)
|
||||
|
||||
// CmdHistory shows the history of an image.
|
||||
//
|
||||
// Usage: docker history [OPTIONS] IMAGE
|
||||
func (cli *DockerCli) CmdHistory(args ...string) error {
|
||||
cmd := Cli.Subcmd("history", []string{"IMAGE"}, Cli.DockerCommands["history"].Description, true)
|
||||
human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format")
|
||||
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
|
||||
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
|
||||
cmd.Require(flag.Exact, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
serverResp, err := cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
history := []types.ImageHistory{}
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&history); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
|
||||
|
||||
if *quiet {
|
||||
for _, entry := range history {
|
||||
if *noTrunc {
|
||||
fmt.Fprintf(w, "%s\n", entry.ID)
|
||||
} else {
|
||||
fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID))
|
||||
}
|
||||
}
|
||||
w.Flush()
|
||||
return nil
|
||||
}
|
||||
|
||||
var imageID string
|
||||
var createdBy string
|
||||
var created string
|
||||
var size string
|
||||
|
||||
fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT")
|
||||
for _, entry := range history {
|
||||
imageID = entry.ID
|
||||
createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1)
|
||||
if *noTrunc == false {
|
||||
createdBy = stringutils.Truncate(createdBy, 45)
|
||||
imageID = stringid.TruncateID(entry.ID)
|
||||
}
|
||||
|
||||
if *human {
|
||||
created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago"
|
||||
size = units.HumanSize(float64(entry.Size))
|
||||
} else {
|
||||
created = time.Unix(entry.Created, 0).Format(time.RFC3339)
|
||||
size = strconv.FormatInt(entry.Size, 10)
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment)
|
||||
}
|
||||
w.Flush()
|
||||
return nil
|
||||
}
|
130
vendor/github.com/docker/docker/api/client/images.go
generated
vendored
130
vendor/github.com/docker/docker/api/client/images.go
generated
vendored
@ -1,130 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/opts"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/parsers/filters"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/units"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
// CmdImages lists the images in a specified repository, or all top-level images if no repository is specified.
|
||||
//
|
||||
// Usage: docker images [OPTIONS] [REPOSITORY]
|
||||
func (cli *DockerCli) CmdImages(args ...string) error {
|
||||
cmd := Cli.Subcmd("images", []string{"[REPOSITORY[:TAG]]"}, Cli.DockerCommands["images"].Description, true)
|
||||
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
|
||||
all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)")
|
||||
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
|
||||
showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests")
|
||||
|
||||
flFilter := opts.NewListOpts(nil)
|
||||
cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided")
|
||||
cmd.Require(flag.Max, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
// Consolidate all filter flags, and sanity check them early.
|
||||
// They'll get process in the daemon/server.
|
||||
imageFilterArgs := filters.Args{}
|
||||
for _, f := range flFilter.GetAll() {
|
||||
var err error
|
||||
imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
matchName := cmd.Arg(0)
|
||||
v := url.Values{}
|
||||
if len(imageFilterArgs) > 0 {
|
||||
filterJSON, err := filters.ToParam(imageFilterArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set("filters", filterJSON)
|
||||
}
|
||||
|
||||
if cmd.NArg() == 1 {
|
||||
// FIXME rename this parameter, to not be confused with the filters flag
|
||||
v.Set("filter", matchName)
|
||||
}
|
||||
if *all {
|
||||
v.Set("all", "1")
|
||||
}
|
||||
|
||||
serverResp, err := cli.call("GET", "/images/json?"+v.Encode(), nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
images := []types.Image{}
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&images); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
|
||||
if !*quiet {
|
||||
if *showDigests {
|
||||
fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
|
||||
} else {
|
||||
fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
|
||||
}
|
||||
}
|
||||
|
||||
for _, image := range images {
|
||||
ID := image.ID
|
||||
if !*noTrunc {
|
||||
ID = stringid.TruncateID(ID)
|
||||
}
|
||||
|
||||
repoTags := image.RepoTags
|
||||
repoDigests := image.RepoDigests
|
||||
|
||||
if len(repoTags) == 1 && repoTags[0] == "<none>:<none>" && len(repoDigests) == 1 && repoDigests[0] == "<none>@<none>" {
|
||||
// dangling image - clear out either repoTags or repoDigsts so we only show it once below
|
||||
repoDigests = []string{}
|
||||
}
|
||||
|
||||
// combine the tags and digests lists
|
||||
tagsAndDigests := append(repoTags, repoDigests...)
|
||||
for _, repoAndRef := range tagsAndDigests {
|
||||
repo, ref := parsers.ParseRepositoryTag(repoAndRef)
|
||||
// default tag and digest to none - if there's a value, it'll be set below
|
||||
tag := "<none>"
|
||||
digest := "<none>"
|
||||
if utils.DigestReference(ref) {
|
||||
digest = ref
|
||||
} else {
|
||||
tag = ref
|
||||
}
|
||||
|
||||
if !*quiet {
|
||||
if *showDigests {
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize)))
|
||||
} else {
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize)))
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintln(w, ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !*quiet {
|
||||
w.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
79
vendor/github.com/docker/docker/api/client/import.go
generated
vendored
79
vendor/github.com/docker/docker/api/client/import.go
generated
vendored
@ -1,79 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/opts"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/docker/docker/registry"
|
||||
)
|
||||
|
||||
// CmdImport creates an empty filesystem image, imports the contents of the tarball into the image, and optionally tags the image.
|
||||
//
|
||||
// The URL argument is the address of a tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) file or a path to local file relative to docker client. If the URL is '-', then the tar file is read from STDIN.
|
||||
//
|
||||
// Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]
|
||||
func (cli *DockerCli) CmdImport(args ...string) error {
|
||||
cmd := Cli.Subcmd("import", []string{"file|URL|- [REPOSITORY[:TAG]]"}, Cli.DockerCommands["import"].Description, true)
|
||||
flChanges := opts.NewListOpts(nil)
|
||||
cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image")
|
||||
message := cmd.String([]string{"m", "-message"}, "", "Set commit message for imported image")
|
||||
cmd.Require(flag.Min, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var (
|
||||
v = url.Values{}
|
||||
src = cmd.Arg(0)
|
||||
repository = cmd.Arg(1)
|
||||
)
|
||||
|
||||
v.Set("fromSrc", src)
|
||||
v.Set("repo", repository)
|
||||
v.Set("message", *message)
|
||||
for _, change := range flChanges.GetAll() {
|
||||
v.Add("changes", change)
|
||||
}
|
||||
if cmd.NArg() == 3 {
|
||||
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'file|URL|- [REPOSITORY [TAG]]' has been deprecated. Please use file|URL|- [REPOSITORY[:TAG]]\n")
|
||||
v.Set("tag", cmd.Arg(2))
|
||||
}
|
||||
|
||||
if repository != "" {
|
||||
//Check if the given image name can be resolved
|
||||
repo, _ := parsers.ParseRepositoryTag(repository)
|
||||
if err := registry.ValidateRepositoryName(repo); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var in io.Reader
|
||||
|
||||
if src == "-" {
|
||||
in = cli.in
|
||||
} else if !urlutil.IsURL(src) {
|
||||
v.Set("fromSrc", "-")
|
||||
file, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
in = file
|
||||
|
||||
}
|
||||
|
||||
sopts := &streamOpts{
|
||||
rawTerminal: true,
|
||||
in: in,
|
||||
out: cli.out,
|
||||
}
|
||||
|
||||
_, err := cli.stream("POST", "/images/create?"+v.Encode(), sopts)
|
||||
return err
|
||||
}
|
114
vendor/github.com/docker/docker/api/client/info.go
generated
vendored
114
vendor/github.com/docker/docker/api/client/info.go
generated
vendored
@ -1,114 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/pkg/httputils"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/units"
|
||||
)
|
||||
|
||||
// CmdInfo displays system-wide information.
|
||||
//
|
||||
// Usage: docker info
|
||||
func (cli *DockerCli) CmdInfo(args ...string) error {
|
||||
cmd := Cli.Subcmd("info", nil, Cli.DockerCommands["info"].Description, true)
|
||||
cmd.Require(flag.Exact, 0)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
serverResp, err := cli.call("GET", "/info", nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
info := &types.Info{}
|
||||
if err := json.NewDecoder(serverResp.body).Decode(info); err != nil {
|
||||
return fmt.Errorf("Error reading remote info: %v", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(cli.out, "Containers: %d\n", info.Containers)
|
||||
fmt.Fprintf(cli.out, "Images: %d\n", info.Images)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Server Version: %s\n", info.ServerVersion)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Storage Driver: %s\n", info.Driver)
|
||||
if info.DriverStatus != nil {
|
||||
for _, pair := range info.DriverStatus {
|
||||
fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1])
|
||||
}
|
||||
}
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Execution Driver: %s\n", info.ExecutionDriver)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Logging Driver: %s\n", info.LoggingDriver)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Kernel Version: %s\n", info.KernelVersion)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Operating System: %s\n", info.OperatingSystem)
|
||||
fmt.Fprintf(cli.out, "CPUs: %d\n", info.NCPU)
|
||||
fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal)))
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Name: %s\n", info.Name)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "ID: %s\n", info.ID)
|
||||
|
||||
if info.Debug {
|
||||
fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug)
|
||||
fmt.Fprintf(cli.out, " File Descriptors: %d\n", info.NFd)
|
||||
fmt.Fprintf(cli.out, " Goroutines: %d\n", info.NGoroutines)
|
||||
fmt.Fprintf(cli.out, " System Time: %s\n", info.SystemTime)
|
||||
fmt.Fprintf(cli.out, " EventsListeners: %d\n", info.NEventsListener)
|
||||
fmt.Fprintf(cli.out, " Init SHA1: %s\n", info.InitSha1)
|
||||
fmt.Fprintf(cli.out, " Init Path: %s\n", info.InitPath)
|
||||
fmt.Fprintf(cli.out, " Docker Root Dir: %s\n", info.DockerRootDir)
|
||||
}
|
||||
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Http Proxy: %s\n", info.HTTPProxy)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "Https Proxy: %s\n", info.HTTPSProxy)
|
||||
ioutils.FprintfIfNotEmpty(cli.out, "No Proxy: %s\n", info.NoProxy)
|
||||
|
||||
if info.IndexServerAddress != "" {
|
||||
u := cli.configFile.AuthConfigs[info.IndexServerAddress].Username
|
||||
if len(u) > 0 {
|
||||
fmt.Fprintf(cli.out, "Username: %v\n", u)
|
||||
fmt.Fprintf(cli.out, "Registry: %v\n", info.IndexServerAddress)
|
||||
}
|
||||
}
|
||||
|
||||
// Only output these warnings if the server does not support these features
|
||||
if h, err := httputils.ParseServerHeader(serverResp.header.Get("Server")); err == nil {
|
||||
if h.OS != "windows" {
|
||||
if !info.MemoryLimit {
|
||||
fmt.Fprintf(cli.err, "WARNING: No memory limit support\n")
|
||||
}
|
||||
if !info.SwapLimit {
|
||||
fmt.Fprintf(cli.err, "WARNING: No swap limit support\n")
|
||||
}
|
||||
if !info.IPv4Forwarding {
|
||||
fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled\n")
|
||||
}
|
||||
if !info.BridgeNfIptables {
|
||||
fmt.Fprintf(cli.err, "WARNING: bridge-nf-call-iptables is disabled\n")
|
||||
}
|
||||
if !info.BridgeNfIP6tables {
|
||||
fmt.Fprintf(cli.err, "WARNING: bridge-nf-call-ip6tables is disabled\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if info.Labels != nil {
|
||||
fmt.Fprintln(cli.out, "Labels:")
|
||||
for _, attribute := range info.Labels {
|
||||
fmt.Fprintf(cli.out, " %s\n", attribute)
|
||||
}
|
||||
}
|
||||
|
||||
ioutils.FprintfIfTrue(cli.out, "Experimental: %v\n", info.ExperimentalBuild)
|
||||
if info.ClusterStore != "" {
|
||||
fmt.Fprintf(cli.out, "Cluster store: %s\n", info.ClusterStore)
|
||||
}
|
||||
|
||||
if info.ClusterAdvertise != "" {
|
||||
fmt.Fprintf(cli.out, "Cluster advertise: %s\n", info.ClusterAdvertise)
|
||||
}
|
||||
return nil
|
||||
}
|
203
vendor/github.com/docker/docker/api/client/inspect.go
generated
vendored
203
vendor/github.com/docker/docker/api/client/inspect.go
generated
vendored
@ -1,203 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
)
|
||||
|
||||
var funcMap = template.FuncMap{
|
||||
"json": func(v interface{}) string {
|
||||
a, _ := json.Marshal(v)
|
||||
return string(a)
|
||||
},
|
||||
}
|
||||
|
||||
// CmdInspect displays low-level information on one or more containers or images.
|
||||
//
|
||||
// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...]
|
||||
func (cli *DockerCli) CmdInspect(args ...string) error {
|
||||
cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE [CONTAINER|IMAGE...]"}, Cli.DockerCommands["inspect"].Description, true)
|
||||
tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template")
|
||||
inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image or container)")
|
||||
size := cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes if the type is container")
|
||||
cmd.Require(flag.Min, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var tmpl *template.Template
|
||||
var err error
|
||||
var obj []byte
|
||||
|
||||
if *tmplStr != "" {
|
||||
if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil {
|
||||
return Cli.StatusError{StatusCode: 64,
|
||||
Status: "Template parsing error: " + err.Error()}
|
||||
}
|
||||
}
|
||||
|
||||
if *inspectType != "" && *inspectType != "container" && *inspectType != "image" {
|
||||
return fmt.Errorf("%q is not a valid value for --type", *inspectType)
|
||||
}
|
||||
|
||||
indented := new(bytes.Buffer)
|
||||
indented.WriteString("[\n")
|
||||
status := 0
|
||||
isImage := false
|
||||
|
||||
v := url.Values{}
|
||||
if *size {
|
||||
v.Set("size", "1")
|
||||
}
|
||||
|
||||
for _, name := range cmd.Args() {
|
||||
if *inspectType == "" || *inspectType == "container" {
|
||||
obj, _, err = readBody(cli.call("GET", "/containers/"+name+"/json?"+v.Encode(), nil, nil))
|
||||
if err != nil {
|
||||
if err == errConnectionFailed {
|
||||
return err
|
||||
}
|
||||
if *inspectType == "container" {
|
||||
if strings.Contains(err.Error(), "No such") {
|
||||
fmt.Fprintf(cli.err, "Error: No such container: %s\n", name)
|
||||
} else {
|
||||
fmt.Fprintf(cli.err, "%s", err)
|
||||
}
|
||||
status = 1
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if obj == nil && (*inspectType == "" || *inspectType == "image") {
|
||||
obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, nil))
|
||||
isImage = true
|
||||
if err != nil {
|
||||
if err == errConnectionFailed {
|
||||
return err
|
||||
}
|
||||
if strings.Contains(err.Error(), "No such") {
|
||||
if *inspectType == "" {
|
||||
fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name)
|
||||
} else {
|
||||
fmt.Fprintf(cli.err, "Error: No such image: %s\n", name)
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(cli.err, "%s", err)
|
||||
}
|
||||
status = 1
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if tmpl == nil {
|
||||
if err := json.Indent(indented, obj, "", " "); err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
status = 1
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
rdr := bytes.NewReader(obj)
|
||||
dec := json.NewDecoder(rdr)
|
||||
buf := bytes.NewBufferString("")
|
||||
|
||||
if isImage {
|
||||
inspPtr := types.ImageInspect{}
|
||||
if err := dec.Decode(&inspPtr); err != nil {
|
||||
fmt.Fprintf(cli.err, "Unable to read inspect data: %v\n", err)
|
||||
status = 1
|
||||
break
|
||||
}
|
||||
if err := tmpl.Execute(buf, inspPtr); err != nil {
|
||||
rdr.Seek(0, 0)
|
||||
var ok bool
|
||||
|
||||
if buf, ok = cli.decodeRawInspect(tmpl, dec); !ok {
|
||||
fmt.Fprintf(cli.err, "Template parsing error: %v\n", err)
|
||||
status = 1
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
inspPtr := types.ContainerJSON{}
|
||||
if err := dec.Decode(&inspPtr); err != nil {
|
||||
fmt.Fprintf(cli.err, "Unable to read inspect data: %v\n", err)
|
||||
status = 1
|
||||
break
|
||||
}
|
||||
if err := tmpl.Execute(buf, inspPtr); err != nil {
|
||||
rdr.Seek(0, 0)
|
||||
var ok bool
|
||||
|
||||
if buf, ok = cli.decodeRawInspect(tmpl, dec); !ok {
|
||||
fmt.Fprintf(cli.err, "Template parsing error: %v\n", err)
|
||||
status = 1
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cli.out.Write(buf.Bytes())
|
||||
cli.out.Write([]byte{'\n'})
|
||||
}
|
||||
indented.WriteString(",")
|
||||
}
|
||||
|
||||
if indented.Len() > 1 {
|
||||
// Remove trailing ','
|
||||
indented.Truncate(indented.Len() - 1)
|
||||
}
|
||||
indented.WriteString("]\n")
|
||||
|
||||
if tmpl == nil {
|
||||
// Note that we will always write "[]" when "-f" isn't specified,
|
||||
// to make sure the output would always be array, see
|
||||
// https://github.com/docker/docker/pull/9500#issuecomment-65846734
|
||||
if _, err := io.Copy(cli.out, indented); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if status != 0 {
|
||||
return Cli.StatusError{StatusCode: status}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeRawInspect executes the inspect template with a raw interface.
|
||||
// This allows docker cli to parse inspect structs injected with Swarm fields.
|
||||
// Unfortunately, go 1.4 doesn't fail executing invalid templates when the input is an interface.
|
||||
// It doesn't allow to modify this behavior either, sending <no value> messages to the output.
|
||||
// We assume that the template is invalid when there is a <no value>, if the template was valid
|
||||
// we'd get <nil> or "" values. In that case we fail with the original error raised executing the
|
||||
// template with the typed input.
|
||||
//
|
||||
// TODO: Go 1.5 allows to customize the error behavior, we can probably get rid of this as soon as
|
||||
// we build Docker with that version:
|
||||
// https://golang.org/pkg/text/template/#Template.Option
|
||||
func (cli *DockerCli) decodeRawInspect(tmpl *template.Template, dec *json.Decoder) (*bytes.Buffer, bool) {
|
||||
var raw interface{}
|
||||
buf := bytes.NewBufferString("")
|
||||
|
||||
if rawErr := dec.Decode(&raw); rawErr != nil {
|
||||
fmt.Fprintf(cli.err, "Unable to read inspect data: %v\n", rawErr)
|
||||
return buf, false
|
||||
}
|
||||
|
||||
if rawErr := tmpl.Execute(buf, raw); rawErr != nil {
|
||||
return buf, false
|
||||
}
|
||||
|
||||
if strings.Contains(buf.String(), "<no value>") {
|
||||
return buf, false
|
||||
}
|
||||
return buf, true
|
||||
}
|
33
vendor/github.com/docker/docker/api/client/kill.go
generated
vendored
33
vendor/github.com/docker/docker/api/client/kill.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
)
|
||||
|
||||
// CmdKill kills one or more running container using SIGKILL or a specified signal.
|
||||
//
|
||||
// Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...]
|
||||
func (cli *DockerCli) CmdKill(args ...string) error {
|
||||
cmd := Cli.Subcmd("kill", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["kill"].Description, true)
|
||||
signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container")
|
||||
cmd.Require(flag.Min, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var errNames []string
|
||||
for _, name := range cmd.Args() {
|
||||
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, nil)); err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
errNames = append(errNames, name)
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, "%s\n", name)
|
||||
}
|
||||
}
|
||||
if len(errNames) > 0 {
|
||||
return fmt.Errorf("Error: failed to kill containers: %v", errNames)
|
||||
}
|
||||
return nil
|
||||
}
|
42
vendor/github.com/docker/docker/api/client/load.go
generated
vendored
42
vendor/github.com/docker/docker/api/client/load.go
generated
vendored
@ -1,42 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
)
|
||||
|
||||
// CmdLoad loads an image from a tar archive.
|
||||
//
|
||||
// The tar archive is read from STDIN by default, or from a tar archive file.
|
||||
//
|
||||
// Usage: docker load [OPTIONS]
|
||||
func (cli *DockerCli) CmdLoad(args ...string) error {
|
||||
cmd := Cli.Subcmd("load", nil, Cli.DockerCommands["load"].Description, true)
|
||||
infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN")
|
||||
cmd.Require(flag.Exact, 0)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var (
|
||||
input io.Reader = cli.in
|
||||
err error
|
||||
)
|
||||
if *infile != "" {
|
||||
input, err = os.Open(*infile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
sopts := &streamOpts{
|
||||
rawTerminal: true,
|
||||
in: input,
|
||||
out: cli.out,
|
||||
}
|
||||
if _, err := cli.stream("POST", "/images/load", sopts); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
153
vendor/github.com/docker/docker/api/client/login.go
generated
vendored
153
vendor/github.com/docker/docker/api/client/login.go
generated
vendored
@ -1,153 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/cliconfig"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/registry"
|
||||
)
|
||||
|
||||
// CmdLogin logs in or registers a user to a Docker registry service.
|
||||
//
|
||||
// If no server is specified, the user will be logged into or registered to the registry's index server.
|
||||
//
|
||||
// Usage: docker login SERVER
|
||||
func (cli *DockerCli) CmdLogin(args ...string) error {
|
||||
cmd := Cli.Subcmd("login", []string{"[SERVER]"}, Cli.DockerCommands["login"].Description+".\nIf no server is specified \""+registry.IndexServer+"\" is the default.", true)
|
||||
cmd.Require(flag.Max, 1)
|
||||
|
||||
var username, password, email string
|
||||
|
||||
cmd.StringVar(&username, []string{"u", "-username"}, "", "Username")
|
||||
cmd.StringVar(&password, []string{"p", "-password"}, "", "Password")
|
||||
cmd.StringVar(&email, []string{"e", "-email"}, "", "Email")
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
// On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210
|
||||
if runtime.GOOS == "windows" {
|
||||
cli.in = os.Stdin
|
||||
}
|
||||
|
||||
serverAddress := registry.IndexServer
|
||||
if len(cmd.Args()) > 0 {
|
||||
serverAddress = cmd.Arg(0)
|
||||
}
|
||||
|
||||
promptDefault := func(prompt string, configDefault string) {
|
||||
if configDefault == "" {
|
||||
fmt.Fprintf(cli.out, "%s: ", prompt)
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault)
|
||||
}
|
||||
}
|
||||
|
||||
readInput := func(in io.Reader, out io.Writer) string {
|
||||
reader := bufio.NewReader(in)
|
||||
line, _, err := reader.ReadLine()
|
||||
if err != nil {
|
||||
fmt.Fprintln(out, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
return string(line)
|
||||
}
|
||||
|
||||
authconfig, ok := cli.configFile.AuthConfigs[serverAddress]
|
||||
if !ok {
|
||||
authconfig = cliconfig.AuthConfig{}
|
||||
}
|
||||
|
||||
if username == "" {
|
||||
promptDefault("Username", authconfig.Username)
|
||||
username = readInput(cli.in, cli.out)
|
||||
username = strings.TrimSpace(username)
|
||||
if username == "" {
|
||||
username = authconfig.Username
|
||||
}
|
||||
}
|
||||
// Assume that a different username means they may not want to use
|
||||
// the password or email from the config file, so prompt them
|
||||
if username != authconfig.Username {
|
||||
if password == "" {
|
||||
oldState, err := term.SaveState(cli.inFd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(cli.out, "Password: ")
|
||||
term.DisableEcho(cli.inFd, oldState)
|
||||
|
||||
password = readInput(cli.in, cli.out)
|
||||
fmt.Fprint(cli.out, "\n")
|
||||
|
||||
term.RestoreTerminal(cli.inFd, oldState)
|
||||
if password == "" {
|
||||
return fmt.Errorf("Error : Password Required")
|
||||
}
|
||||
}
|
||||
|
||||
if email == "" {
|
||||
promptDefault("Email", authconfig.Email)
|
||||
email = readInput(cli.in, cli.out)
|
||||
if email == "" {
|
||||
email = authconfig.Email
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// However, if they don't override the username use the
|
||||
// password or email from the cmd line if specified. IOW, allow
|
||||
// then to change/override them. And if not specified, just
|
||||
// use what's in the config file
|
||||
if password == "" {
|
||||
password = authconfig.Password
|
||||
}
|
||||
if email == "" {
|
||||
email = authconfig.Email
|
||||
}
|
||||
}
|
||||
authconfig.Username = username
|
||||
authconfig.Password = password
|
||||
authconfig.Email = email
|
||||
authconfig.ServerAddress = serverAddress
|
||||
cli.configFile.AuthConfigs[serverAddress] = authconfig
|
||||
|
||||
serverResp, err := cli.call("POST", "/auth", cli.configFile.AuthConfigs[serverAddress], nil)
|
||||
if serverResp.statusCode == 401 {
|
||||
delete(cli.configFile.AuthConfigs, serverAddress)
|
||||
if err2 := cli.configFile.Save(); err2 != nil {
|
||||
fmt.Fprintf(cli.out, "WARNING: could not save config file: %v\n", err2)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
var response types.AuthResponse
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&response); err != nil {
|
||||
// Upon error, remove entry
|
||||
delete(cli.configFile.AuthConfigs, serverAddress)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cli.configFile.Save(); err != nil {
|
||||
return fmt.Errorf("Error saving config file: %v", err)
|
||||
}
|
||||
fmt.Fprintf(cli.out, "WARNING: login credentials saved in %s\n", cli.configFile.Filename())
|
||||
|
||||
if response.Status != "" {
|
||||
fmt.Fprintf(cli.out, "%s\n", response.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
39
vendor/github.com/docker/docker/api/client/logout.go
generated
vendored
39
vendor/github.com/docker/docker/api/client/logout.go
generated
vendored
@ -1,39 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/registry"
|
||||
)
|
||||
|
||||
// CmdLogout logs a user out from a Docker registry.
|
||||
//
|
||||
// If no server is specified, the user will be logged out from the registry's index server.
|
||||
//
|
||||
// Usage: docker logout [SERVER]
|
||||
func (cli *DockerCli) CmdLogout(args ...string) error {
|
||||
cmd := Cli.Subcmd("logout", []string{"[SERVER]"}, Cli.DockerCommands["logout"].Description+".\nIf no server is specified \""+registry.IndexServer+"\" is the default.", true)
|
||||
cmd.Require(flag.Max, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
serverAddress := registry.IndexServer
|
||||
if len(cmd.Args()) > 0 {
|
||||
serverAddress = cmd.Arg(0)
|
||||
}
|
||||
|
||||
if _, ok := cli.configFile.AuthConfigs[serverAddress]; !ok {
|
||||
fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress)
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress)
|
||||
delete(cli.configFile.AuthConfigs, serverAddress)
|
||||
if err := cli.configFile.Save(); err != nil {
|
||||
return fmt.Errorf("Failed to save docker config: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
74
vendor/github.com/docker/docker/api/client/logs.go
generated
vendored
74
vendor/github.com/docker/docker/api/client/logs.go
generated
vendored
@ -1,74 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/timeutils"
|
||||
)
|
||||
|
||||
var validDrivers = map[string]bool{
|
||||
"json-file": true,
|
||||
"journald": true,
|
||||
}
|
||||
|
||||
// CmdLogs fetches the logs of a given container.
|
||||
//
|
||||
// docker logs [OPTIONS] CONTAINER
|
||||
func (cli *DockerCli) CmdLogs(args ...string) error {
|
||||
cmd := Cli.Subcmd("logs", []string{"CONTAINER"}, Cli.DockerCommands["logs"].Description, true)
|
||||
follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
|
||||
since := cmd.String([]string{"-since"}, "", "Show logs since timestamp")
|
||||
times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
|
||||
tail := cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs")
|
||||
cmd.Require(flag.Exact, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
name := cmd.Arg(0)
|
||||
|
||||
serverResp, err := cli.call("GET", "/containers/"+name+"/json", nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var c types.ContainerJSON
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !validDrivers[c.HostConfig.LogConfig.Type] {
|
||||
return fmt.Errorf("\"logs\" command is supported only for \"json-file\" and \"journald\" logging drivers (got: %s)", c.HostConfig.LogConfig.Type)
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("stdout", "1")
|
||||
v.Set("stderr", "1")
|
||||
|
||||
if *since != "" {
|
||||
v.Set("since", timeutils.GetTimestamp(*since, time.Now()))
|
||||
}
|
||||
|
||||
if *times {
|
||||
v.Set("timestamps", "1")
|
||||
}
|
||||
|
||||
if *follow {
|
||||
v.Set("follow", "1")
|
||||
}
|
||||
v.Set("tail", *tail)
|
||||
|
||||
sopts := &streamOpts{
|
||||
rawTerminal: c.Config.Tty,
|
||||
out: cli.out,
|
||||
err: cli.err,
|
||||
}
|
||||
|
||||
_, err = cli.stream("GET", "/containers/"+name+"/logs?"+v.Encode(), sopts)
|
||||
return err
|
||||
}
|
381
vendor/github.com/docker/docker/api/client/network.go
generated
vendored
381
vendor/github.com/docker/docker/api/client/network.go
generated
vendored
@ -1,381 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/daemon/network"
|
||||
"github.com/docker/docker/opts"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
)
|
||||
|
||||
// CmdNetwork is the parent subcommand for all network commands
|
||||
//
|
||||
// Usage: docker network <COMMAND> [OPTIONS]
|
||||
func (cli *DockerCli) CmdNetwork(args ...string) error {
|
||||
cmd := Cli.Subcmd("network", []string{"COMMAND [OPTIONS]"}, networkUsage(), false)
|
||||
cmd.Require(flag.Min, 1)
|
||||
err := cmd.ParseFlags(args, true)
|
||||
cmd.Usage()
|
||||
return err
|
||||
}
|
||||
|
||||
// CmdNetworkCreate creates a new network with a given name
|
||||
//
|
||||
// Usage: docker network create [OPTIONS] <NETWORK-NAME>
|
||||
func (cli *DockerCli) CmdNetworkCreate(args ...string) error {
|
||||
cmd := Cli.Subcmd("network create", []string{"NETWORK-NAME"}, "Creates a new network with a name specified by the user", false)
|
||||
flDriver := cmd.String([]string{"d", "-driver"}, "bridge", "Driver to manage the Network")
|
||||
flOpts := opts.NewMapOpts(nil, nil)
|
||||
|
||||
flIpamDriver := cmd.String([]string{"-ipam-driver"}, "default", "IP Address Management Driver")
|
||||
flIpamSubnet := opts.NewListOpts(nil)
|
||||
flIpamIPRange := opts.NewListOpts(nil)
|
||||
flIpamGateway := opts.NewListOpts(nil)
|
||||
flIpamAux := opts.NewMapOpts(nil, nil)
|
||||
|
||||
cmd.Var(&flIpamSubnet, []string{"-subnet"}, "subnet in CIDR format that represents a network segment")
|
||||
cmd.Var(&flIpamIPRange, []string{"-ip-range"}, "allocate container ip from a sub-range")
|
||||
cmd.Var(&flIpamGateway, []string{"-gateway"}, "ipv4 or ipv6 Gateway for the master subnet")
|
||||
cmd.Var(flIpamAux, []string{"-aux-address"}, "auxiliary ipv4 or ipv6 addresses used by Network driver")
|
||||
cmd.Var(flOpts, []string{"o", "-opt"}, "set driver specific options")
|
||||
|
||||
cmd.Require(flag.Exact, 1)
|
||||
err := cmd.ParseFlags(args, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the default driver to "" if the user didn't set the value.
|
||||
// That way we can know whether it was user input or not.
|
||||
driver := *flDriver
|
||||
if !cmd.IsSet("-driver") && !cmd.IsSet("d") {
|
||||
driver = ""
|
||||
}
|
||||
|
||||
ipamCfg, err := consolidateIpam(flIpamSubnet.GetAll(), flIpamIPRange.GetAll(), flIpamGateway.GetAll(), flIpamAux.GetAll())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct network create request body
|
||||
nc := types.NetworkCreate{
|
||||
Name: cmd.Arg(0),
|
||||
Driver: driver,
|
||||
IPAM: network.IPAM{Driver: *flIpamDriver, Config: ipamCfg},
|
||||
Options: flOpts.GetAll(),
|
||||
CheckDuplicate: true,
|
||||
}
|
||||
obj, _, err := readBody(cli.call("POST", "/networks/create", nc, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var resp types.NetworkCreateResponse
|
||||
err = json.Unmarshal(obj, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(cli.out, "%s\n", resp.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CmdNetworkRm deletes a network
|
||||
//
|
||||
// Usage: docker network rm <NETWORK-NAME | NETWORK-ID>
|
||||
func (cli *DockerCli) CmdNetworkRm(args ...string) error {
|
||||
cmd := Cli.Subcmd("network rm", []string{"NETWORK"}, "Deletes a network", false)
|
||||
cmd.Require(flag.Exact, 1)
|
||||
err := cmd.ParseFlags(args, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _, err = readBody(cli.call("DELETE", "/networks/"+cmd.Arg(0), nil, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CmdNetworkConnect connects a container to a network
|
||||
//
|
||||
// Usage: docker network connect <NETWORK> <CONTAINER>
|
||||
func (cli *DockerCli) CmdNetworkConnect(args ...string) error {
|
||||
cmd := Cli.Subcmd("network connect", []string{"NETWORK CONTAINER"}, "Connects a container to a network", false)
|
||||
cmd.Require(flag.Exact, 2)
|
||||
err := cmd.ParseFlags(args, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nc := types.NetworkConnect{Container: cmd.Arg(1)}
|
||||
_, _, err = readBody(cli.call("POST", "/networks/"+cmd.Arg(0)+"/connect", nc, nil))
|
||||
return err
|
||||
}
|
||||
|
||||
// CmdNetworkDisconnect disconnects a container from a network
|
||||
//
|
||||
// Usage: docker network disconnect <NETWORK> <CONTAINER>
|
||||
func (cli *DockerCli) CmdNetworkDisconnect(args ...string) error {
|
||||
cmd := Cli.Subcmd("network disconnect", []string{"NETWORK CONTAINER"}, "Disconnects container from a network", false)
|
||||
cmd.Require(flag.Exact, 2)
|
||||
err := cmd.ParseFlags(args, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nc := types.NetworkConnect{Container: cmd.Arg(1)}
|
||||
_, _, err = readBody(cli.call("POST", "/networks/"+cmd.Arg(0)+"/disconnect", nc, nil))
|
||||
return err
|
||||
}
|
||||
|
||||
// CmdNetworkLs lists all the netorks managed by docker daemon
|
||||
//
|
||||
// Usage: docker network ls [OPTIONS]
|
||||
func (cli *DockerCli) CmdNetworkLs(args ...string) error {
|
||||
cmd := Cli.Subcmd("network ls", nil, "Lists networks", true)
|
||||
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
|
||||
noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Do not truncate the output")
|
||||
|
||||
cmd.Require(flag.Exact, 0)
|
||||
err := cmd.ParseFlags(args, true)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obj, _, err := readBody(cli.call("GET", "/networks", nil, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var networkResources []types.NetworkResource
|
||||
err = json.Unmarshal(obj, &networkResources)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wr := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
|
||||
|
||||
// unless quiet (-q) is specified, print field titles
|
||||
if !*quiet {
|
||||
fmt.Fprintln(wr, "NETWORK ID\tNAME\tDRIVER")
|
||||
}
|
||||
|
||||
for _, networkResource := range networkResources {
|
||||
ID := networkResource.ID
|
||||
netName := networkResource.Name
|
||||
if !*noTrunc {
|
||||
ID = stringid.TruncateID(ID)
|
||||
}
|
||||
if *quiet {
|
||||
fmt.Fprintln(wr, ID)
|
||||
continue
|
||||
}
|
||||
driver := networkResource.Driver
|
||||
fmt.Fprintf(wr, "%s\t%s\t%s\t",
|
||||
ID,
|
||||
netName,
|
||||
driver)
|
||||
fmt.Fprint(wr, "\n")
|
||||
}
|
||||
wr.Flush()
|
||||
return nil
|
||||
}
|
||||
|
||||
// CmdNetworkInspect inspects the network object for more details
|
||||
//
|
||||
// Usage: docker network inspect [OPTIONS] <NETWORK> [NETWORK...]
|
||||
func (cli *DockerCli) CmdNetworkInspect(args ...string) error {
|
||||
cmd := Cli.Subcmd("network inspect", []string{"NETWORK [NETWORK...]"}, "Displays detailed information on a network", false)
|
||||
cmd.Require(flag.Min, 1)
|
||||
err := cmd.ParseFlags(args, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
status := 0
|
||||
var networks []*types.NetworkResource
|
||||
for _, name := range cmd.Args() {
|
||||
obj, _, err := readBody(cli.call("GET", "/networks/"+name, nil, nil))
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
fmt.Fprintf(cli.err, "Error: No such network: %s\n", name)
|
||||
} else {
|
||||
fmt.Fprintf(cli.err, "%s", err)
|
||||
}
|
||||
status = 1
|
||||
continue
|
||||
}
|
||||
networkResource := types.NetworkResource{}
|
||||
if err := json.NewDecoder(bytes.NewReader(obj)).Decode(&networkResource); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
networks = append(networks, &networkResource)
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(networks, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(cli.out, bytes.NewReader(b)); err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(cli.out, "\n")
|
||||
|
||||
if status != 0 {
|
||||
return Cli.StatusError{StatusCode: status}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Consolidates the ipam configuration as a group from different related configurations
|
||||
// user can configure network with multiple non-overlapping subnets and hence it is
|
||||
// possible to corelate the various related parameters and consolidate them.
|
||||
// consoidateIpam consolidates subnets, ip-ranges, gateways and auxilary addresses into
|
||||
// structured ipam data.
|
||||
func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) {
|
||||
if len(subnets) < len(ranges) || len(subnets) < len(gateways) {
|
||||
return nil, fmt.Errorf("every ip-range or gateway must have a corresponding subnet")
|
||||
}
|
||||
iData := map[string]*network.IPAMConfig{}
|
||||
|
||||
// Populate non-overlapping subnets into consolidation map
|
||||
for _, s := range subnets {
|
||||
for k := range iData {
|
||||
ok1, err := subnetMatches(s, k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ok2, err := subnetMatches(k, s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok1 || ok2 {
|
||||
return nil, fmt.Errorf("multiple overlapping subnet configuration is not supported")
|
||||
}
|
||||
}
|
||||
iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}}
|
||||
}
|
||||
|
||||
// Validate and add valid ip ranges
|
||||
for _, r := range ranges {
|
||||
match := false
|
||||
for _, s := range subnets {
|
||||
ok, err := subnetMatches(s, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if iData[s].IPRange != "" {
|
||||
return nil, fmt.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s)
|
||||
}
|
||||
d := iData[s]
|
||||
d.IPRange = r
|
||||
match = true
|
||||
}
|
||||
if !match {
|
||||
return nil, fmt.Errorf("no matching subnet for range %s", r)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate and add valid gateways
|
||||
for _, g := range gateways {
|
||||
match := false
|
||||
for _, s := range subnets {
|
||||
ok, err := subnetMatches(s, g)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if iData[s].Gateway != "" {
|
||||
return nil, fmt.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s)
|
||||
}
|
||||
d := iData[s]
|
||||
d.Gateway = g
|
||||
match = true
|
||||
}
|
||||
if !match {
|
||||
return nil, fmt.Errorf("no matching subnet for gateway %s", g)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate and add aux-addresses
|
||||
for key, aa := range auxaddrs {
|
||||
match := false
|
||||
for _, s := range subnets {
|
||||
ok, err := subnetMatches(s, aa)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
iData[s].AuxAddress[key] = aa
|
||||
match = true
|
||||
}
|
||||
if !match {
|
||||
return nil, fmt.Errorf("no matching subnet for aux-address %s", aa)
|
||||
}
|
||||
}
|
||||
|
||||
idl := []network.IPAMConfig{}
|
||||
for _, v := range iData {
|
||||
idl = append(idl, *v)
|
||||
}
|
||||
return idl, nil
|
||||
}
|
||||
|
||||
func subnetMatches(subnet, data string) (bool, error) {
|
||||
var (
|
||||
ip net.IP
|
||||
)
|
||||
|
||||
_, s, err := net.ParseCIDR(subnet)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Invalid subnet %s : %v", s, err)
|
||||
}
|
||||
|
||||
if strings.Contains(data, "/") {
|
||||
ip, _, err = net.ParseCIDR(data)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Invalid cidr %s : %v", data, err)
|
||||
}
|
||||
} else {
|
||||
ip = net.ParseIP(data)
|
||||
}
|
||||
|
||||
return s.Contains(ip), nil
|
||||
}
|
||||
|
||||
func networkUsage() string {
|
||||
networkCommands := map[string]string{
|
||||
"create": "Create a network",
|
||||
"connect": "Connect container to a network",
|
||||
"disconnect": "Disconnect container from a network",
|
||||
"inspect": "Display detailed network information",
|
||||
"ls": "List all networks",
|
||||
"rm": "Remove a network",
|
||||
}
|
||||
|
||||
help := "Commands:\n"
|
||||
|
||||
for cmd, description := range networkCommands {
|
||||
help += fmt.Sprintf(" %-25.25s%s\n", cmd, description)
|
||||
}
|
||||
|
||||
help += fmt.Sprintf("\nRun 'docker network COMMAND --help' for more information on a command.")
|
||||
return help
|
||||
}
|
32
vendor/github.com/docker/docker/api/client/pause.go
generated
vendored
32
vendor/github.com/docker/docker/api/client/pause.go
generated
vendored
@ -1,32 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
)
|
||||
|
||||
// CmdPause pauses all processes within one or more containers.
|
||||
//
|
||||
// Usage: docker pause CONTAINER [CONTAINER...]
|
||||
func (cli *DockerCli) CmdPause(args ...string) error {
|
||||
cmd := Cli.Subcmd("pause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["pause"].Description, true)
|
||||
cmd.Require(flag.Min, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var errNames []string
|
||||
for _, name := range cmd.Args() {
|
||||
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/pause", name), nil, nil)); err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
errNames = append(errNames, name)
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, "%s\n", name)
|
||||
}
|
||||
}
|
||||
if len(errNames) > 0 {
|
||||
return fmt.Errorf("Error: failed to pause containers: %v", errNames)
|
||||
}
|
||||
return nil
|
||||
}
|
72
vendor/github.com/docker/docker/api/client/port.go
generated
vendored
72
vendor/github.com/docker/docker/api/client/port.go
generated
vendored
@ -1,72 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/nat"
|
||||
)
|
||||
|
||||
// CmdPort lists port mappings for a container.
|
||||
// If a private port is specified, it also shows the public-facing port that is NATed to the private port.
|
||||
//
|
||||
// Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]]
|
||||
func (cli *DockerCli) CmdPort(args ...string) error {
|
||||
cmd := Cli.Subcmd("port", []string{"CONTAINER [PRIVATE_PORT[/PROTO]]"}, Cli.DockerCommands["port"].Description, true)
|
||||
cmd.Require(flag.Min, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
serverResp, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
var c struct {
|
||||
NetworkSettings struct {
|
||||
Ports nat.PortMap
|
||||
}
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cmd.NArg() == 2 {
|
||||
var (
|
||||
port = cmd.Arg(1)
|
||||
proto = "tcp"
|
||||
parts = strings.SplitN(port, "/", 2)
|
||||
)
|
||||
|
||||
if len(parts) == 2 && len(parts[1]) != 0 {
|
||||
port = parts[0]
|
||||
proto = parts[1]
|
||||
}
|
||||
natPort := port + "/" + proto
|
||||
newP, err := nat.NewPort(proto, port)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil {
|
||||
for _, frontend := range frontends {
|
||||
fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIP, frontend.HostPort)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0))
|
||||
}
|
||||
|
||||
for from, frontends := range c.NetworkSettings.Ports {
|
||||
for _, frontend := range frontends {
|
||||
fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
116
vendor/github.com/docker/docker/api/client/ps.go
generated
vendored
116
vendor/github.com/docker/docker/api/client/ps.go
generated
vendored
@ -1,116 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/docker/api/client/ps"
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/opts"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/parsers/filters"
|
||||
)
|
||||
|
||||
// CmdPs outputs a list of Docker containers.
|
||||
//
|
||||
// Usage: docker ps [OPTIONS]
|
||||
func (cli *DockerCli) CmdPs(args ...string) error {
|
||||
var (
|
||||
err error
|
||||
|
||||
psFilterArgs = filters.Args{}
|
||||
v = url.Values{}
|
||||
|
||||
cmd = Cli.Subcmd("ps", nil, Cli.DockerCommands["ps"].Description, true)
|
||||
quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
|
||||
size = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes")
|
||||
all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)")
|
||||
noTrunc = cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
|
||||
nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show the latest created container, include non-running")
|
||||
since = cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show created since Id or Name, include non-running")
|
||||
before = cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name")
|
||||
last = cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running")
|
||||
format = cmd.String([]string{"-format"}, "", "Pretty-print containers using a Go template")
|
||||
flFilter = opts.NewListOpts(nil)
|
||||
)
|
||||
cmd.Require(flag.Exact, 0)
|
||||
|
||||
cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided")
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
if *last == -1 && *nLatest {
|
||||
*last = 1
|
||||
}
|
||||
|
||||
if *all {
|
||||
v.Set("all", "1")
|
||||
}
|
||||
|
||||
if *last != -1 {
|
||||
v.Set("limit", strconv.Itoa(*last))
|
||||
}
|
||||
|
||||
if *since != "" {
|
||||
v.Set("since", *since)
|
||||
}
|
||||
|
||||
if *before != "" {
|
||||
v.Set("before", *before)
|
||||
}
|
||||
|
||||
if *size {
|
||||
v.Set("size", "1")
|
||||
}
|
||||
|
||||
// Consolidate all filter flags, and sanity check them.
|
||||
// They'll get processed in the daemon/server.
|
||||
for _, f := range flFilter.GetAll() {
|
||||
if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(psFilterArgs) > 0 {
|
||||
filterJSON, err := filters.ToParam(psFilterArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v.Set("filters", filterJSON)
|
||||
}
|
||||
|
||||
serverResp, err := cli.call("GET", "/containers/json?"+v.Encode(), nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
containers := []types.Container{}
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&containers); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f := *format
|
||||
if len(f) == 0 {
|
||||
if len(cli.PsFormat()) > 0 && !*quiet {
|
||||
f = cli.PsFormat()
|
||||
} else {
|
||||
f = "table"
|
||||
}
|
||||
}
|
||||
|
||||
psCtx := ps.Context{
|
||||
Output: cli.out,
|
||||
Format: f,
|
||||
Quiet: *quiet,
|
||||
Size: *size,
|
||||
Trunc: !*noTrunc,
|
||||
}
|
||||
|
||||
ps.Format(psCtx, containers)
|
||||
|
||||
return nil
|
||||
}
|
160
vendor/github.com/docker/docker/api/client/ps/custom.go
generated
vendored
160
vendor/github.com/docker/docker/api/client/ps/custom.go
generated
vendored
@ -1,160 +0,0 @@
|
||||
package ps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/stringutils"
|
||||
"github.com/docker/docker/pkg/units"
|
||||
)
|
||||
|
||||
const (
|
||||
tableKey = "table"
|
||||
|
||||
idHeader = "CONTAINER ID"
|
||||
imageHeader = "IMAGE"
|
||||
namesHeader = "NAMES"
|
||||
commandHeader = "COMMAND"
|
||||
createdAtHeader = "CREATED AT"
|
||||
runningForHeader = "CREATED"
|
||||
statusHeader = "STATUS"
|
||||
portsHeader = "PORTS"
|
||||
sizeHeader = "SIZE"
|
||||
labelsHeader = "LABELS"
|
||||
)
|
||||
|
||||
type containerContext struct {
|
||||
trunc bool
|
||||
header []string
|
||||
c types.Container
|
||||
}
|
||||
|
||||
func (c *containerContext) ID() string {
|
||||
c.addHeader(idHeader)
|
||||
if c.trunc {
|
||||
return stringid.TruncateID(c.c.ID)
|
||||
}
|
||||
return c.c.ID
|
||||
}
|
||||
|
||||
func (c *containerContext) Names() string {
|
||||
c.addHeader(namesHeader)
|
||||
names := stripNamePrefix(c.c.Names)
|
||||
if c.trunc {
|
||||
for _, name := range names {
|
||||
if len(strings.Split(name, "/")) == 1 {
|
||||
names = []string{name}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.Join(names, ",")
|
||||
}
|
||||
|
||||
func (c *containerContext) Image() string {
|
||||
c.addHeader(imageHeader)
|
||||
if c.c.Image == "" {
|
||||
return "<no image>"
|
||||
}
|
||||
if c.trunc {
|
||||
if stringid.TruncateID(c.c.ImageID) == stringid.TruncateID(c.c.Image) {
|
||||
return stringutils.Truncate(c.c.Image, 12)
|
||||
}
|
||||
}
|
||||
return c.c.Image
|
||||
}
|
||||
|
||||
func (c *containerContext) Command() string {
|
||||
c.addHeader(commandHeader)
|
||||
command := c.c.Command
|
||||
if c.trunc {
|
||||
command = stringutils.Truncate(command, 20)
|
||||
}
|
||||
return strconv.Quote(command)
|
||||
}
|
||||
|
||||
func (c *containerContext) CreatedAt() string {
|
||||
c.addHeader(createdAtHeader)
|
||||
return time.Unix(int64(c.c.Created), 0).String()
|
||||
}
|
||||
|
||||
func (c *containerContext) RunningFor() string {
|
||||
c.addHeader(runningForHeader)
|
||||
createdAt := time.Unix(int64(c.c.Created), 0)
|
||||
return units.HumanDuration(time.Now().UTC().Sub(createdAt))
|
||||
}
|
||||
|
||||
func (c *containerContext) Ports() string {
|
||||
c.addHeader(portsHeader)
|
||||
return api.DisplayablePorts(c.c.Ports)
|
||||
}
|
||||
|
||||
func (c *containerContext) Status() string {
|
||||
c.addHeader(statusHeader)
|
||||
return c.c.Status
|
||||
}
|
||||
|
||||
func (c *containerContext) Size() string {
|
||||
c.addHeader(sizeHeader)
|
||||
srw := units.HumanSize(float64(c.c.SizeRw))
|
||||
sv := units.HumanSize(float64(c.c.SizeRootFs))
|
||||
|
||||
sf := srw
|
||||
if c.c.SizeRootFs > 0 {
|
||||
sf = fmt.Sprintf("%s (virtual %s)", srw, sv)
|
||||
}
|
||||
return sf
|
||||
}
|
||||
|
||||
func (c *containerContext) Labels() string {
|
||||
c.addHeader(labelsHeader)
|
||||
if c.c.Labels == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
var joinLabels []string
|
||||
for k, v := range c.c.Labels {
|
||||
joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
return strings.Join(joinLabels, ",")
|
||||
}
|
||||
|
||||
func (c *containerContext) Label(name string) string {
|
||||
n := strings.Split(name, ".")
|
||||
r := strings.NewReplacer("-", " ", "_", " ")
|
||||
h := r.Replace(n[len(n)-1])
|
||||
|
||||
c.addHeader(h)
|
||||
|
||||
if c.c.Labels == nil {
|
||||
return ""
|
||||
}
|
||||
return c.c.Labels[name]
|
||||
}
|
||||
|
||||
func (c *containerContext) fullHeader() string {
|
||||
if c.header == nil {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(c.header, "\t")
|
||||
}
|
||||
|
||||
func (c *containerContext) addHeader(header string) {
|
||||
if c.header == nil {
|
||||
c.header = []string{}
|
||||
}
|
||||
c.header = append(c.header, strings.ToUpper(header))
|
||||
}
|
||||
|
||||
func stripNamePrefix(ss []string) []string {
|
||||
for i, s := range ss {
|
||||
ss[i] = s[1:]
|
||||
}
|
||||
|
||||
return ss
|
||||
}
|
126
vendor/github.com/docker/docker/api/client/ps/custom_test.go
generated
vendored
126
vendor/github.com/docker/docker/api/client/ps/custom_test.go
generated
vendored
@ -1,126 +0,0 @@
|
||||
package ps
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
)
|
||||
|
||||
func TestContainerPsContext(t *testing.T) {
|
||||
containerID := stringid.GenerateRandomID()
|
||||
unix := time.Now().Unix()
|
||||
|
||||
var ctx containerContext
|
||||
cases := []struct {
|
||||
container types.Container
|
||||
trunc bool
|
||||
expValue string
|
||||
expHeader string
|
||||
call func() string
|
||||
}{
|
||||
{types.Container{ID: containerID}, true, stringid.TruncateID(containerID), idHeader, ctx.ID},
|
||||
{types.Container{ID: containerID}, false, containerID, idHeader, ctx.ID},
|
||||
{types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", namesHeader, ctx.Names},
|
||||
{types.Container{Image: "ubuntu"}, true, "ubuntu", imageHeader, ctx.Image},
|
||||
{types.Container{Image: "verylongimagename"}, true, "verylongimagename", imageHeader, ctx.Image},
|
||||
{types.Container{Image: "verylongimagename"}, false, "verylongimagename", imageHeader, ctx.Image},
|
||||
{types.Container{
|
||||
Image: "a5a665ff33eced1e0803148700880edab4",
|
||||
ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5",
|
||||
},
|
||||
true,
|
||||
"a5a665ff33ec",
|
||||
imageHeader,
|
||||
ctx.Image,
|
||||
},
|
||||
{types.Container{
|
||||
Image: "a5a665ff33eced1e0803148700880edab4",
|
||||
ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5",
|
||||
},
|
||||
false,
|
||||
"a5a665ff33eced1e0803148700880edab4",
|
||||
imageHeader,
|
||||
ctx.Image,
|
||||
},
|
||||
{types.Container{Image: ""}, true, "<no image>", imageHeader, ctx.Image},
|
||||
{types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, commandHeader, ctx.Command},
|
||||
{types.Container{Created: unix}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt},
|
||||
{types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", portsHeader, ctx.Ports},
|
||||
{types.Container{Status: "RUNNING"}, true, "RUNNING", statusHeader, ctx.Status},
|
||||
{types.Container{SizeRw: 10}, true, "10 B", sizeHeader, ctx.Size},
|
||||
{types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10 B (virtual 20 B)", sizeHeader, ctx.Size},
|
||||
{types.Container{}, true, "", labelsHeader, ctx.Labels},
|
||||
{types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", labelsHeader, ctx.Labels},
|
||||
{types.Container{Created: unix}, true, "Less than a second", runningForHeader, ctx.RunningFor},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
ctx = containerContext{c: c.container, trunc: c.trunc}
|
||||
v := c.call()
|
||||
if strings.Contains(v, ",") {
|
||||
// comma-separated values means probably a map input, which won't
|
||||
// be guaranteed to have the same order as our expected value
|
||||
// We'll create maps and use reflect.DeepEquals to check instead:
|
||||
entriesMap := make(map[string]string)
|
||||
expMap := make(map[string]string)
|
||||
entries := strings.Split(v, ",")
|
||||
expectedEntries := strings.Split(c.expValue, ",")
|
||||
for _, entry := range entries {
|
||||
keyval := strings.Split(entry, "=")
|
||||
entriesMap[keyval[0]] = keyval[1]
|
||||
}
|
||||
for _, expected := range expectedEntries {
|
||||
keyval := strings.Split(expected, "=")
|
||||
expMap[keyval[0]] = keyval[1]
|
||||
}
|
||||
if !reflect.DeepEqual(expMap, entriesMap) {
|
||||
t.Fatalf("Expected entries: %v, got: %v", c.expValue, v)
|
||||
}
|
||||
} else if v != c.expValue {
|
||||
t.Fatalf("Expected %s, was %s\n", c.expValue, v)
|
||||
}
|
||||
|
||||
h := ctx.fullHeader()
|
||||
if h != c.expHeader {
|
||||
t.Fatalf("Expected %s, was %s\n", c.expHeader, h)
|
||||
}
|
||||
}
|
||||
|
||||
c1 := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}}
|
||||
ctx = containerContext{c: c1, trunc: true}
|
||||
|
||||
sid := ctx.Label("com.docker.swarm.swarm-id")
|
||||
node := ctx.Label("com.docker.swarm.node_name")
|
||||
if sid != "33" {
|
||||
t.Fatalf("Expected 33, was %s\n", sid)
|
||||
}
|
||||
|
||||
if node != "ubuntu" {
|
||||
t.Fatalf("Expected ubuntu, was %s\n", node)
|
||||
}
|
||||
|
||||
h := ctx.fullHeader()
|
||||
if h != "SWARM ID\tNODE NAME" {
|
||||
t.Fatalf("Expected %s, was %s\n", "SWARM ID\tNODE NAME", h)
|
||||
|
||||
}
|
||||
|
||||
c2 := types.Container{}
|
||||
ctx = containerContext{c: c2, trunc: true}
|
||||
|
||||
label := ctx.Label("anything.really")
|
||||
if label != "" {
|
||||
t.Fatalf("Expected an empty string, was %s", label)
|
||||
}
|
||||
|
||||
ctx = containerContext{c: c2, trunc: true}
|
||||
fullHeader := ctx.fullHeader()
|
||||
if fullHeader != "" {
|
||||
t.Fatalf("Expected fullHeader to be empty, was %s", fullHeader)
|
||||
}
|
||||
|
||||
}
|
140
vendor/github.com/docker/docker/api/client/ps/formatter.go
generated
vendored
140
vendor/github.com/docker/docker/api/client/ps/formatter.go
generated
vendored
@ -1,140 +0,0 @@
|
||||
package ps
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
const (
|
||||
tableFormatKey = "table"
|
||||
rawFormatKey = "raw"
|
||||
|
||||
defaultTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Ports}}\t{{.Names}}"
|
||||
defaultQuietFormat = "{{.ID}}"
|
||||
)
|
||||
|
||||
// Context contains information required by the formatter to print the output as desired.
|
||||
type Context struct {
|
||||
// Output is the output stream to which the formatted string is written.
|
||||
Output io.Writer
|
||||
// Format is used to choose raw, table or custom format for the output.
|
||||
Format string
|
||||
// Size when set to true will display the size of the output.
|
||||
Size bool
|
||||
// Quiet when set to true will simply print minimal information.
|
||||
Quiet bool
|
||||
// Trunc when set to true will truncate the output of certain fields such as Container ID.
|
||||
Trunc bool
|
||||
}
|
||||
|
||||
// Format helps to format the output using the parameters set in the Context.
|
||||
// Currently Format allow to display in raw, table or custom format the output.
|
||||
func Format(ctx Context, containers []types.Container) {
|
||||
switch ctx.Format {
|
||||
case tableFormatKey:
|
||||
tableFormat(ctx, containers)
|
||||
case rawFormatKey:
|
||||
rawFormat(ctx, containers)
|
||||
default:
|
||||
customFormat(ctx, containers)
|
||||
}
|
||||
}
|
||||
|
||||
func rawFormat(ctx Context, containers []types.Container) {
|
||||
if ctx.Quiet {
|
||||
ctx.Format = `container_id: {{.ID}}`
|
||||
} else {
|
||||
ctx.Format = `container_id: {{.ID}}
|
||||
image: {{.Image}}
|
||||
command: {{.Command}}
|
||||
created_at: {{.CreatedAt}}
|
||||
status: {{.Status}}
|
||||
names: {{.Names}}
|
||||
labels: {{.Labels}}
|
||||
ports: {{.Ports}}
|
||||
`
|
||||
if ctx.Size {
|
||||
ctx.Format += `size: {{.Size}}
|
||||
`
|
||||
}
|
||||
}
|
||||
|
||||
customFormat(ctx, containers)
|
||||
}
|
||||
|
||||
func tableFormat(ctx Context, containers []types.Container) {
|
||||
ctx.Format = defaultTableFormat
|
||||
if ctx.Quiet {
|
||||
ctx.Format = defaultQuietFormat
|
||||
}
|
||||
|
||||
customFormat(ctx, containers)
|
||||
}
|
||||
|
||||
func customFormat(ctx Context, containers []types.Container) {
|
||||
var (
|
||||
table bool
|
||||
header string
|
||||
format = ctx.Format
|
||||
buffer = bytes.NewBufferString("")
|
||||
)
|
||||
|
||||
if strings.HasPrefix(ctx.Format, tableKey) {
|
||||
table = true
|
||||
format = format[len(tableKey):]
|
||||
}
|
||||
|
||||
format = strings.Trim(format, " ")
|
||||
r := strings.NewReplacer(`\t`, "\t", `\n`, "\n")
|
||||
format = r.Replace(format)
|
||||
|
||||
if table && ctx.Size {
|
||||
format += "\t{{.Size}}"
|
||||
}
|
||||
|
||||
tmpl, err := template.New("").Parse(format)
|
||||
if err != nil {
|
||||
buffer.WriteString(fmt.Sprintf("Template parsing error: %v\n", err))
|
||||
buffer.WriteTo(ctx.Output)
|
||||
return
|
||||
}
|
||||
|
||||
for _, container := range containers {
|
||||
containerCtx := &containerContext{
|
||||
trunc: ctx.Trunc,
|
||||
c: container,
|
||||
}
|
||||
if err := tmpl.Execute(buffer, containerCtx); err != nil {
|
||||
buffer = bytes.NewBufferString(fmt.Sprintf("Template parsing error: %v\n", err))
|
||||
buffer.WriteTo(ctx.Output)
|
||||
return
|
||||
}
|
||||
if table && len(header) == 0 {
|
||||
header = containerCtx.fullHeader()
|
||||
}
|
||||
buffer.WriteString("\n")
|
||||
}
|
||||
|
||||
if table {
|
||||
if len(header) == 0 {
|
||||
// if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template
|
||||
containerCtx := &containerContext{}
|
||||
tmpl.Execute(bytes.NewBufferString(""), containerCtx)
|
||||
header = containerCtx.fullHeader()
|
||||
}
|
||||
|
||||
t := tabwriter.NewWriter(ctx.Output, 20, 1, 3, ' ', 0)
|
||||
t.Write([]byte(header))
|
||||
t.Write([]byte("\n"))
|
||||
buffer.WriteTo(t)
|
||||
t.Flush()
|
||||
} else {
|
||||
buffer.WriteTo(ctx.Output)
|
||||
}
|
||||
}
|
208
vendor/github.com/docker/docker/api/client/ps/formatter_test.go
generated
vendored
208
vendor/github.com/docker/docker/api/client/ps/formatter_test.go
generated
vendored
@ -1,208 +0,0 @@
|
||||
package ps
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
func TestFormat(t *testing.T) {
|
||||
contexts := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
// Errors
|
||||
{
|
||||
Context{
|
||||
Format: "{{InvalidFunction}}",
|
||||
},
|
||||
`Template parsing error: template: :1: function "InvalidFunction" not defined
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "{{nil}}",
|
||||
},
|
||||
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
|
||||
`,
|
||||
},
|
||||
// Table Format
|
||||
{
|
||||
Context{
|
||||
Format: "table",
|
||||
},
|
||||
`CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
containerID1 ubuntu "" 45 years ago foobar_baz
|
||||
containerID2 ubuntu "" 45 years ago foobar_bar
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "table {{.Image}}",
|
||||
},
|
||||
"IMAGE\nubuntu\nubuntu\n",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "table {{.Image}}",
|
||||
Size: true,
|
||||
},
|
||||
"IMAGE SIZE\nubuntu 0 B\nubuntu 0 B\n",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "table {{.Image}}",
|
||||
Quiet: true,
|
||||
},
|
||||
"IMAGE\nubuntu\nubuntu\n",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "table",
|
||||
Quiet: true,
|
||||
},
|
||||
"containerID1\ncontainerID2\n",
|
||||
},
|
||||
// Raw Format
|
||||
{
|
||||
Context{
|
||||
Format: "raw",
|
||||
},
|
||||
`container_id: containerID1
|
||||
image: ubuntu
|
||||
command: ""
|
||||
created_at: 1970-01-01 00:00:00 +0000 UTC
|
||||
status:
|
||||
names: foobar_baz
|
||||
labels:
|
||||
ports:
|
||||
|
||||
container_id: containerID2
|
||||
image: ubuntu
|
||||
command: ""
|
||||
created_at: 1970-01-01 00:00:00 +0000 UTC
|
||||
status:
|
||||
names: foobar_bar
|
||||
labels:
|
||||
ports:
|
||||
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "raw",
|
||||
Size: true,
|
||||
},
|
||||
`container_id: containerID1
|
||||
image: ubuntu
|
||||
command: ""
|
||||
created_at: 1970-01-01 00:00:00 +0000 UTC
|
||||
status:
|
||||
names: foobar_baz
|
||||
labels:
|
||||
ports:
|
||||
size: 0 B
|
||||
|
||||
container_id: containerID2
|
||||
image: ubuntu
|
||||
command: ""
|
||||
created_at: 1970-01-01 00:00:00 +0000 UTC
|
||||
status:
|
||||
names: foobar_bar
|
||||
labels:
|
||||
ports:
|
||||
size: 0 B
|
||||
|
||||
`,
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "raw",
|
||||
Quiet: true,
|
||||
},
|
||||
"container_id: containerID1\ncontainer_id: containerID2\n",
|
||||
},
|
||||
// Custom Format
|
||||
{
|
||||
Context{
|
||||
Format: "{{.Image}}",
|
||||
},
|
||||
"ubuntu\nubuntu\n",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "{{.Image}}",
|
||||
Size: true,
|
||||
},
|
||||
"ubuntu\nubuntu\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, context := range contexts {
|
||||
containers := []types.Container{
|
||||
{ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu"},
|
||||
{ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu"},
|
||||
}
|
||||
out := bytes.NewBufferString("")
|
||||
context.context.Output = out
|
||||
Format(context.context, containers)
|
||||
actual := out.String()
|
||||
if actual != context.expected {
|
||||
t.Fatalf("Expected \n%s, got \n%s", context.expected, actual)
|
||||
}
|
||||
// Clean buffer
|
||||
out.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustomFormatNoContainers(t *testing.T) {
|
||||
out := bytes.NewBufferString("")
|
||||
containers := []types.Container{}
|
||||
|
||||
contexts := []struct {
|
||||
context Context
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
Context{
|
||||
Format: "{{.Image}}",
|
||||
Output: out,
|
||||
},
|
||||
"",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "table {{.Image}}",
|
||||
Output: out,
|
||||
},
|
||||
"IMAGE\n",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "{{.Image}}",
|
||||
Output: out,
|
||||
Size: true,
|
||||
},
|
||||
"",
|
||||
},
|
||||
{
|
||||
Context{
|
||||
Format: "table {{.Image}}",
|
||||
Output: out,
|
||||
Size: true,
|
||||
},
|
||||
"IMAGE SIZE\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, context := range contexts {
|
||||
customFormat(context.context, containers)
|
||||
actual := out.String()
|
||||
if actual != context.expected {
|
||||
t.Fatalf("Expected \n%s, got \n%s", context.expected, actual)
|
||||
}
|
||||
// Clean buffer
|
||||
out.Reset()
|
||||
}
|
||||
}
|
53
vendor/github.com/docker/docker/api/client/pull.go
generated
vendored
53
vendor/github.com/docker/docker/api/client/pull.go
generated
vendored
@ -1,53 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/graph/tags"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/registry"
|
||||
)
|
||||
|
||||
// CmdPull pulls an image or a repository from the registry.
|
||||
//
|
||||
// Usage: docker pull [OPTIONS] IMAGENAME[:TAG|@DIGEST]
|
||||
func (cli *DockerCli) CmdPull(args ...string) error {
|
||||
cmd := Cli.Subcmd("pull", []string{"NAME[:TAG|@DIGEST]"}, Cli.DockerCommands["pull"].Description, true)
|
||||
allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository")
|
||||
addTrustedFlags(cmd, true)
|
||||
cmd.Require(flag.Exact, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
remote := cmd.Arg(0)
|
||||
|
||||
taglessRemote, tag := parsers.ParseRepositoryTag(remote)
|
||||
if tag == "" && !*allTags {
|
||||
tag = tags.DefaultTag
|
||||
fmt.Fprintf(cli.out, "Using default tag: %s\n", tag)
|
||||
} else if tag != "" && *allTags {
|
||||
return fmt.Errorf("tag can't be used with --all-tags/-a")
|
||||
}
|
||||
|
||||
ref := registry.ParseReference(tag)
|
||||
|
||||
// Resolve the Repository name from fqn to RepositoryInfo
|
||||
repoInfo, err := registry.ParseRepositoryInfo(taglessRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isTrusted() && !ref.HasDigest() {
|
||||
// Check if tag is digest
|
||||
authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index)
|
||||
return cli.trustedPull(repoInfo, ref, authConfig)
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("fromImage", ref.ImageName(taglessRemote))
|
||||
|
||||
_, _, err = cli.clientRequestAttemptLogin("POST", "/images/create?"+v.Encode(), nil, cli.out, repoInfo.Index, "pull")
|
||||
return err
|
||||
}
|
53
vendor/github.com/docker/docker/api/client/push.go
generated
vendored
53
vendor/github.com/docker/docker/api/client/push.go
generated
vendored
@ -1,53 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/registry"
|
||||
)
|
||||
|
||||
// CmdPush pushes an image or repository to the registry.
|
||||
//
|
||||
// Usage: docker push NAME[:TAG]
|
||||
func (cli *DockerCli) CmdPush(args ...string) error {
|
||||
cmd := Cli.Subcmd("push", []string{"NAME[:TAG]"}, Cli.DockerCommands["push"].Description, true)
|
||||
addTrustedFlags(cmd, false)
|
||||
cmd.Require(flag.Exact, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
remote, tag := parsers.ParseRepositoryTag(cmd.Arg(0))
|
||||
|
||||
// Resolve the Repository name from fqn to RepositoryInfo
|
||||
repoInfo, err := registry.ParseRepositoryInfo(remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Resolve the Auth config relevant for this server
|
||||
authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index)
|
||||
// If we're not using a custom registry, we know the restrictions
|
||||
// applied to repository names and can warn the user in advance.
|
||||
// Custom repositories can have different rules, and we must also
|
||||
// allow pushing by image ID.
|
||||
if repoInfo.Official {
|
||||
username := authConfig.Username
|
||||
if username == "" {
|
||||
username = "<user>"
|
||||
}
|
||||
return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository to <user>/<repo> (ex: %s/%s)", username, repoInfo.LocalName)
|
||||
}
|
||||
|
||||
if isTrusted() {
|
||||
return cli.trustedPush(repoInfo, tag, authConfig)
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("tag", tag)
|
||||
|
||||
_, _, err = cli.clientRequestAttemptLogin("POST", "/images/"+remote+"/push?"+v.Encode(), nil, cli.out, repoInfo.Index, "push")
|
||||
return err
|
||||
}
|
32
vendor/github.com/docker/docker/api/client/rename.go
generated
vendored
32
vendor/github.com/docker/docker/api/client/rename.go
generated
vendored
@ -1,32 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
)
|
||||
|
||||
// CmdRename renames a container.
|
||||
//
|
||||
// Usage: docker rename OLD_NAME NEW_NAME
|
||||
func (cli *DockerCli) CmdRename(args ...string) error {
|
||||
cmd := Cli.Subcmd("rename", []string{"OLD_NAME NEW_NAME"}, Cli.DockerCommands["rename"].Description, true)
|
||||
cmd.Require(flag.Exact, 2)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
oldName := strings.TrimSpace(cmd.Arg(0))
|
||||
newName := strings.TrimSpace(cmd.Arg(1))
|
||||
|
||||
if oldName == "" || newName == "" {
|
||||
return fmt.Errorf("Error: Neither old nor new names may be empty")
|
||||
}
|
||||
|
||||
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/rename?name=%s", oldName, newName), nil, nil)); err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
return fmt.Errorf("Error: failed to rename container named %s", oldName)
|
||||
}
|
||||
return nil
|
||||
}
|
39
vendor/github.com/docker/docker/api/client/restart.go
generated
vendored
39
vendor/github.com/docker/docker/api/client/restart.go
generated
vendored
@ -1,39 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
)
|
||||
|
||||
// CmdRestart restarts one or more containers.
|
||||
//
|
||||
// Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...]
|
||||
func (cli *DockerCli) CmdRestart(args ...string) error {
|
||||
cmd := Cli.Subcmd("restart", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["restart"].Description, true)
|
||||
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing the container")
|
||||
cmd.Require(flag.Min, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("t", strconv.Itoa(*nSeconds))
|
||||
|
||||
var errNames []string
|
||||
for _, name := range cmd.Args() {
|
||||
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, nil))
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
errNames = append(errNames, name)
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, "%s\n", name)
|
||||
}
|
||||
}
|
||||
if len(errNames) > 0 {
|
||||
return fmt.Errorf("Error: failed to restart containers: %v", errNames)
|
||||
}
|
||||
return nil
|
||||
}
|
55
vendor/github.com/docker/docker/api/client/rm.go
generated
vendored
55
vendor/github.com/docker/docker/api/client/rm.go
generated
vendored
@ -1,55 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
)
|
||||
|
||||
// CmdRm removes one or more containers.
|
||||
//
|
||||
// Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...]
|
||||
func (cli *DockerCli) CmdRm(args ...string) error {
|
||||
cmd := Cli.Subcmd("rm", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["rm"].Description, true)
|
||||
v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container")
|
||||
link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link")
|
||||
force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)")
|
||||
cmd.Require(flag.Min, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
val := url.Values{}
|
||||
if *v {
|
||||
val.Set("v", "1")
|
||||
}
|
||||
if *link {
|
||||
val.Set("link", "1")
|
||||
}
|
||||
|
||||
if *force {
|
||||
val.Set("force", "1")
|
||||
}
|
||||
|
||||
var errNames []string
|
||||
for _, name := range cmd.Args() {
|
||||
if name == "" {
|
||||
return fmt.Errorf("Container name cannot be empty")
|
||||
}
|
||||
name = strings.Trim(name, "/")
|
||||
|
||||
_, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, nil))
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
errNames = append(errNames, name)
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, "%s\n", name)
|
||||
}
|
||||
}
|
||||
if len(errNames) > 0 {
|
||||
return fmt.Errorf("Error: failed to remove containers: %v", errNames)
|
||||
}
|
||||
return nil
|
||||
}
|
61
vendor/github.com/docker/docker/api/client/rmi.go
generated
vendored
61
vendor/github.com/docker/docker/api/client/rmi.go
generated
vendored
@ -1,61 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
)
|
||||
|
||||
// CmdRmi removes all images with the specified name(s).
|
||||
//
|
||||
// Usage: docker rmi [OPTIONS] IMAGE [IMAGE...]
|
||||
func (cli *DockerCli) CmdRmi(args ...string) error {
|
||||
cmd := Cli.Subcmd("rmi", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["rmi"].Description, true)
|
||||
force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image")
|
||||
noprune := cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents")
|
||||
cmd.Require(flag.Min, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
v := url.Values{}
|
||||
if *force {
|
||||
v.Set("force", "1")
|
||||
}
|
||||
if *noprune {
|
||||
v.Set("noprune", "1")
|
||||
}
|
||||
|
||||
var errNames []string
|
||||
for _, name := range cmd.Args() {
|
||||
serverResp, err := cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, nil)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
errNames = append(errNames, name)
|
||||
} else {
|
||||
defer serverResp.body.Close()
|
||||
|
||||
dels := []types.ImageDelete{}
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&dels); err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
errNames = append(errNames, name)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, del := range dels {
|
||||
if del.Deleted != "" {
|
||||
fmt.Fprintf(cli.out, "Deleted: %s\n", del.Deleted)
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, "Untagged: %s\n", del.Untagged)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(errNames) > 0 {
|
||||
return fmt.Errorf("Error: failed to remove images: %v", errNames)
|
||||
}
|
||||
return nil
|
||||
}
|
284
vendor/github.com/docker/docker/api/client/run.go
generated
vendored
284
vendor/github.com/docker/docker/api/client/run.go
generated
vendored
@ -1,284 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
derr "github.com/docker/docker/errors"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/promise"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/docker/docker/runconfig"
|
||||
"github.com/docker/libnetwork/resolvconf/dns"
|
||||
)
|
||||
|
||||
func (cid *cidFile) Close() error {
|
||||
cid.file.Close()
|
||||
|
||||
if !cid.written {
|
||||
if err := os.Remove(cid.path); err != nil {
|
||||
return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cid *cidFile) Write(id string) error {
|
||||
if _, err := cid.file.Write([]byte(id)); err != nil {
|
||||
return fmt.Errorf("Failed to write the container ID to the file: %s", err)
|
||||
}
|
||||
cid.written = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// if container start fails with 'command not found' error, return 127
|
||||
// if container start fails with 'command cannot be invoked' error, return 126
|
||||
// return 125 for generic docker daemon failures
|
||||
func runStartContainerErr(err error) error {
|
||||
trimmedErr := strings.Trim(err.Error(), "Error response from daemon: ")
|
||||
statusError := Cli.StatusError{}
|
||||
derrCmdNotFound := derr.ErrorCodeCmdNotFound.Message()
|
||||
derrCouldNotInvoke := derr.ErrorCodeCmdCouldNotBeInvoked.Message()
|
||||
derrNoSuchImage := derr.ErrorCodeNoSuchImageHash.Message()
|
||||
derrNoSuchImageTag := derr.ErrorCodeNoSuchImageTag.Message()
|
||||
switch trimmedErr {
|
||||
case derrCmdNotFound:
|
||||
statusError = Cli.StatusError{StatusCode: 127}
|
||||
case derrCouldNotInvoke:
|
||||
statusError = Cli.StatusError{StatusCode: 126}
|
||||
case derrNoSuchImage, derrNoSuchImageTag:
|
||||
statusError = Cli.StatusError{StatusCode: 125}
|
||||
default:
|
||||
statusError = Cli.StatusError{StatusCode: 125}
|
||||
}
|
||||
return statusError
|
||||
}
|
||||
|
||||
// CmdRun runs a command in a new container.
|
||||
//
|
||||
// Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...]
|
||||
func (cli *DockerCli) CmdRun(args ...string) error {
|
||||
cmd := Cli.Subcmd("run", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["run"].Description, true)
|
||||
addTrustedFlags(cmd, true)
|
||||
|
||||
// These are flags not stored in Config/HostConfig
|
||||
var (
|
||||
flAutoRemove = cmd.Bool([]string{"-rm"}, false, "Automatically remove the container when it exits")
|
||||
flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID")
|
||||
flSigProxy = cmd.Bool([]string{"-sig-proxy"}, true, "Proxy received signals to the process")
|
||||
flName = cmd.String([]string{"-name"}, "", "Assign a name to the container")
|
||||
flAttach *opts.ListOpts
|
||||
|
||||
ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d")
|
||||
ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm")
|
||||
ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d")
|
||||
)
|
||||
|
||||
config, hostConfig, cmd, err := runconfig.Parse(cmd, args)
|
||||
// just in case the Parse does not exit
|
||||
if err != nil {
|
||||
cmd.ReportError(err.Error(), true)
|
||||
os.Exit(125)
|
||||
}
|
||||
|
||||
if len(hostConfig.DNS) > 0 {
|
||||
// check the DNS settings passed via --dns against
|
||||
// localhost regexp to warn if they are trying to
|
||||
// set a DNS to a localhost address
|
||||
for _, dnsIP := range hostConfig.DNS {
|
||||
if dns.IsLocalhost(dnsIP) {
|
||||
fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if config.Image == "" {
|
||||
cmd.Usage()
|
||||
return nil
|
||||
}
|
||||
|
||||
if !*flDetach {
|
||||
if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if fl := cmd.Lookup("-attach"); fl != nil {
|
||||
flAttach = fl.Value.(*opts.ListOpts)
|
||||
if flAttach.Len() != 0 {
|
||||
return ErrConflictAttachDetach
|
||||
}
|
||||
}
|
||||
if *flAutoRemove {
|
||||
return ErrConflictDetachAutoRemove
|
||||
}
|
||||
|
||||
config.AttachStdin = false
|
||||
config.AttachStdout = false
|
||||
config.AttachStderr = false
|
||||
config.StdinOnce = false
|
||||
}
|
||||
|
||||
// Disable flSigProxy when in TTY mode
|
||||
sigProxy := *flSigProxy
|
||||
if config.Tty {
|
||||
sigProxy = false
|
||||
}
|
||||
|
||||
// Telling the Windows daemon the initial size of the tty during start makes
|
||||
// a far better user experience rather than relying on subsequent resizes
|
||||
// to cause things to catch up.
|
||||
if runtime.GOOS == "windows" {
|
||||
hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = cli.getTtySize()
|
||||
}
|
||||
|
||||
createResponse, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName)
|
||||
if err != nil {
|
||||
cmd.ReportError(err.Error(), true)
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
if sigProxy {
|
||||
sigc := cli.forwardAllSignals(createResponse.ID)
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
var (
|
||||
waitDisplayID chan struct{}
|
||||
errCh chan error
|
||||
)
|
||||
if !config.AttachStdout && !config.AttachStderr {
|
||||
// Make this asynchronous to allow the client to write to stdin before having to read the ID
|
||||
waitDisplayID = make(chan struct{})
|
||||
go func() {
|
||||
defer close(waitDisplayID)
|
||||
fmt.Fprintf(cli.out, "%s\n", createResponse.ID)
|
||||
}()
|
||||
}
|
||||
if *flAutoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) {
|
||||
return ErrConflictRestartPolicyAndAutoRemove
|
||||
}
|
||||
// We need to instantiate the chan because the select needs it. It can
|
||||
// be closed but can't be uninitialized.
|
||||
hijacked := make(chan io.Closer)
|
||||
// Block the return until the chan gets closed
|
||||
defer func() {
|
||||
logrus.Debugf("End of CmdRun(), Waiting for hijack to finish.")
|
||||
if _, ok := <-hijacked; ok {
|
||||
fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)")
|
||||
}
|
||||
}()
|
||||
if config.AttachStdin || config.AttachStdout || config.AttachStderr {
|
||||
var (
|
||||
out, stderr io.Writer
|
||||
in io.ReadCloser
|
||||
v = url.Values{}
|
||||
)
|
||||
v.Set("stream", "1")
|
||||
if config.AttachStdin {
|
||||
v.Set("stdin", "1")
|
||||
in = cli.in
|
||||
}
|
||||
if config.AttachStdout {
|
||||
v.Set("stdout", "1")
|
||||
out = cli.out
|
||||
}
|
||||
if config.AttachStderr {
|
||||
v.Set("stderr", "1")
|
||||
if config.Tty {
|
||||
stderr = cli.out
|
||||
} else {
|
||||
stderr = cli.err
|
||||
}
|
||||
}
|
||||
errCh = promise.Go(func() error {
|
||||
return cli.hijack("POST", "/containers/"+createResponse.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked, nil)
|
||||
})
|
||||
} else {
|
||||
close(hijacked)
|
||||
}
|
||||
// Acknowledge the hijack before starting
|
||||
select {
|
||||
case closer := <-hijacked:
|
||||
// Make sure that the hijack gets closed when returning (results
|
||||
// in closing the hijack chan and freeing server's goroutines)
|
||||
if closer != nil {
|
||||
defer closer.Close()
|
||||
}
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
logrus.Debugf("Error hijack: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if *flAutoRemove {
|
||||
if _, _, err = readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, nil)); err != nil {
|
||||
fmt.Fprintf(cli.err, "Error deleting container: %s\n", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
//start the container
|
||||
if _, _, err := readBody(cli.call("POST", "/containers/"+createResponse.ID+"/start", nil, nil)); err != nil {
|
||||
cmd.ReportError(err.Error(), false)
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
|
||||
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut {
|
||||
if err := cli.monitorTtySize(createResponse.ID, false); err != nil {
|
||||
fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if errCh != nil {
|
||||
if err := <-errCh; err != nil {
|
||||
logrus.Debugf("Error hijack: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Detached mode: wait for the id to be displayed and return.
|
||||
if !config.AttachStdout && !config.AttachStderr {
|
||||
// Detached mode
|
||||
<-waitDisplayID
|
||||
return nil
|
||||
}
|
||||
|
||||
var status int
|
||||
|
||||
// Attached mode
|
||||
if *flAutoRemove {
|
||||
// Autoremove: wait for the container to finish, retrieve
|
||||
// the exit code and remove the container
|
||||
if _, _, err := readBody(cli.call("POST", "/containers/"+createResponse.ID+"/wait", nil, nil)); err != nil {
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
if _, status, err = getExitCode(cli, createResponse.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// No Autoremove: Simply retrieve the exit code
|
||||
if !config.Tty {
|
||||
// In non-TTY mode, we can't detach, so we must wait for container exit
|
||||
if status, err = waitForExit(cli, createResponse.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// In TTY mode, there is a race: if the process dies too slowly, the state could
|
||||
// be updated after the getExitCode call and result in the wrong exit code being reported
|
||||
if _, status, err = getExitCode(cli, createResponse.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if status != 0 {
|
||||
return Cli.StatusError{StatusCode: status}
|
||||
}
|
||||
return nil
|
||||
}
|
52
vendor/github.com/docker/docker/api/client/save.go
generated
vendored
52
vendor/github.com/docker/docker/api/client/save.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
)
|
||||
|
||||
// CmdSave saves one or more images to a tar archive.
|
||||
//
|
||||
// The tar archive is written to STDOUT by default, or written to a file.
|
||||
//
|
||||
// Usage: docker save [OPTIONS] IMAGE [IMAGE...]
|
||||
func (cli *DockerCli) CmdSave(args ...string) error {
|
||||
cmd := Cli.Subcmd("save", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["save"].Description+" (streamed to STDOUT by default)", true)
|
||||
outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT")
|
||||
cmd.Require(flag.Min, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var (
|
||||
output = cli.out
|
||||
err error
|
||||
)
|
||||
|
||||
if *outfile == "" && cli.isTerminalOut {
|
||||
return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.")
|
||||
}
|
||||
if *outfile != "" {
|
||||
if output, err = os.Create(*outfile); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
sopts := &streamOpts{
|
||||
rawTerminal: true,
|
||||
out: output,
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
for _, arg := range cmd.Args() {
|
||||
v.Add("names", arg)
|
||||
}
|
||||
if _, err := cli.stream("GET", "/images/get?"+v.Encode(), sopts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
88
vendor/github.com/docker/docker/api/client/search.go
generated
vendored
88
vendor/github.com/docker/docker/api/client/search.go
generated
vendored
@ -1,88 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/pkg/stringutils"
|
||||
"github.com/docker/docker/registry"
|
||||
)
|
||||
|
||||
// ByStars sorts search results in ascending order by number of stars.
|
||||
type ByStars []registry.SearchResult
|
||||
|
||||
func (r ByStars) Len() int { return len(r) }
|
||||
func (r ByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
|
||||
func (r ByStars) Less(i, j int) bool { return r[i].StarCount < r[j].StarCount }
|
||||
|
||||
// CmdSearch searches the Docker Hub for images.
|
||||
//
|
||||
// Usage: docker search [OPTIONS] TERM
|
||||
func (cli *DockerCli) CmdSearch(args ...string) error {
|
||||
cmd := Cli.Subcmd("search", []string{"TERM"}, Cli.DockerCommands["search"].Description, true)
|
||||
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
|
||||
trusted := cmd.Bool([]string{"#t", "#trusted", "#-trusted"}, false, "Only show trusted builds")
|
||||
automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds")
|
||||
stars := cmd.Uint([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least x stars")
|
||||
cmd.Require(flag.Exact, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
name := cmd.Arg(0)
|
||||
v := url.Values{}
|
||||
v.Set("term", name)
|
||||
|
||||
// Resolve the Repository name from fqn to hostname + name
|
||||
taglessRemote, _ := parsers.ParseRepositoryTag(name)
|
||||
|
||||
indexInfo, err := registry.ParseIndexInfo(taglessRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rdr, _, err := cli.clientRequestAttemptLogin("GET", "/images/search?"+v.Encode(), nil, nil, indexInfo, "search")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer rdr.Close()
|
||||
|
||||
results := ByStars{}
|
||||
if err := json.NewDecoder(rdr).Decode(&results); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(results))
|
||||
|
||||
w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0)
|
||||
fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n")
|
||||
for _, res := range results {
|
||||
if (*automated && !res.IsAutomated) || (int(*stars) > res.StarCount) || (*trusted && !res.IsTrusted) {
|
||||
continue
|
||||
}
|
||||
desc := strings.Replace(res.Description, "\n", " ", -1)
|
||||
desc = strings.Replace(desc, "\r", " ", -1)
|
||||
if !*noTrunc && len(desc) > 45 {
|
||||
desc = stringutils.Truncate(desc, 42) + "..."
|
||||
}
|
||||
fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount)
|
||||
if res.IsOfficial {
|
||||
fmt.Fprint(w, "[OK]")
|
||||
|
||||
}
|
||||
fmt.Fprint(w, "\t")
|
||||
if res.IsAutomated || res.IsTrusted {
|
||||
fmt.Fprint(w, "[OK]")
|
||||
}
|
||||
fmt.Fprint(w, "\n")
|
||||
}
|
||||
w.Flush()
|
||||
return nil
|
||||
}
|
171
vendor/github.com/docker/docker/api/client/start.go
generated
vendored
171
vendor/github.com/docker/docker/api/client/start.go
generated
vendored
@ -1,171 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/promise"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
)
|
||||
|
||||
func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
|
||||
sigc := make(chan os.Signal, 128)
|
||||
signal.CatchAll(sigc)
|
||||
go func() {
|
||||
for s := range sigc {
|
||||
if s == signal.SIGCHLD {
|
||||
continue
|
||||
}
|
||||
var sig string
|
||||
for sigStr, sigN := range signal.SignalMap {
|
||||
if sigN == s {
|
||||
sig = sigStr
|
||||
break
|
||||
}
|
||||
}
|
||||
if sig == "" {
|
||||
fmt.Fprintf(cli.err, "Unsupported signal: %v. Discarding.\n", s)
|
||||
continue
|
||||
}
|
||||
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, nil)); err != nil {
|
||||
logrus.Debugf("Error sending signal: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return sigc
|
||||
}
|
||||
|
||||
// CmdStart starts one or more containers.
|
||||
//
|
||||
// Usage: docker start [OPTIONS] CONTAINER [CONTAINER...]
|
||||
func (cli *DockerCli) CmdStart(args ...string) error {
|
||||
cmd := Cli.Subcmd("start", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["start"].Description, true)
|
||||
attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals")
|
||||
openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN")
|
||||
cmd.Require(flag.Min, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var (
|
||||
cErr chan error
|
||||
tty bool
|
||||
)
|
||||
|
||||
if *attach || *openStdin {
|
||||
if cmd.NArg() > 1 {
|
||||
return fmt.Errorf("You cannot start and attach multiple containers at once.")
|
||||
}
|
||||
|
||||
serverResp, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
var c types.ContainerJSON
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tty = c.Config.Tty
|
||||
|
||||
if !tty {
|
||||
sigc := cli.forwardAllSignals(cmd.Arg(0))
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
|
||||
var in io.ReadCloser
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("stream", "1")
|
||||
|
||||
if *openStdin && c.Config.OpenStdin {
|
||||
v.Set("stdin", "1")
|
||||
in = cli.in
|
||||
}
|
||||
|
||||
v.Set("stdout", "1")
|
||||
v.Set("stderr", "1")
|
||||
|
||||
hijacked := make(chan io.Closer)
|
||||
// Block the return until the chan gets closed
|
||||
defer func() {
|
||||
logrus.Debugf("CmdStart() returned, defer waiting for hijack to finish.")
|
||||
if _, ok := <-hijacked; ok {
|
||||
fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)")
|
||||
}
|
||||
cli.in.Close()
|
||||
}()
|
||||
cErr = promise.Go(func() error {
|
||||
return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, hijacked, nil)
|
||||
})
|
||||
|
||||
// Acknowledge the hijack before starting
|
||||
select {
|
||||
case closer := <-hijacked:
|
||||
// Make sure that the hijack gets closed when returning (results
|
||||
// in closing the hijack chan and freeing server's goroutines)
|
||||
if closer != nil {
|
||||
defer closer.Close()
|
||||
}
|
||||
case err := <-cErr:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var encounteredError error
|
||||
var errNames []string
|
||||
for _, name := range cmd.Args() {
|
||||
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, nil))
|
||||
if err != nil {
|
||||
if !*attach && !*openStdin {
|
||||
// attach and openStdin is false means it could be starting multiple containers
|
||||
// when a container start failed, show the error message and start next
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
errNames = append(errNames, name)
|
||||
} else {
|
||||
encounteredError = err
|
||||
}
|
||||
} else {
|
||||
if !*attach && !*openStdin {
|
||||
fmt.Fprintf(cli.out, "%s\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errNames) > 0 {
|
||||
encounteredError = fmt.Errorf("Error: failed to start containers: %v", errNames)
|
||||
}
|
||||
if encounteredError != nil {
|
||||
return encounteredError
|
||||
}
|
||||
|
||||
if *openStdin || *attach {
|
||||
if tty && cli.isTerminalOut {
|
||||
if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil {
|
||||
fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
|
||||
}
|
||||
}
|
||||
if attchErr := <-cErr; attchErr != nil {
|
||||
return attchErr
|
||||
}
|
||||
_, status, err := getExitCode(cli, cmd.Arg(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if status != 0 {
|
||||
return Cli.StatusError{StatusCode: status}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
351
vendor/github.com/docker/docker/api/client/stats.go
generated
vendored
351
vendor/github.com/docker/docker/api/client/stats.go
generated
vendored
@ -1,351 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/pkg/units"
|
||||
)
|
||||
|
||||
type containerStats struct {
|
||||
Name string
|
||||
CPUPercentage float64
|
||||
Memory float64
|
||||
MemoryLimit float64
|
||||
MemoryPercentage float64
|
||||
NetworkRx float64
|
||||
NetworkTx float64
|
||||
BlockRead float64
|
||||
BlockWrite float64
|
||||
mu sync.RWMutex
|
||||
err error
|
||||
}
|
||||
|
||||
type stats struct {
|
||||
mu sync.Mutex
|
||||
cs []*containerStats
|
||||
}
|
||||
|
||||
func (s *containerStats) Collect(cli *DockerCli, streamStats bool) {
|
||||
v := url.Values{}
|
||||
if streamStats {
|
||||
v.Set("stream", "1")
|
||||
} else {
|
||||
v.Set("stream", "0")
|
||||
}
|
||||
serverResp, err := cli.call("GET", "/containers/"+s.Name+"/stats?"+v.Encode(), nil, nil)
|
||||
if err != nil {
|
||||
s.mu.Lock()
|
||||
s.err = err
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
var (
|
||||
previousCPU uint64
|
||||
previousSystem uint64
|
||||
dec = json.NewDecoder(serverResp.body)
|
||||
u = make(chan error, 1)
|
||||
)
|
||||
go func() {
|
||||
for {
|
||||
var v *types.StatsJSON
|
||||
if err := dec.Decode(&v); err != nil {
|
||||
u <- err
|
||||
return
|
||||
}
|
||||
|
||||
var memPercent = 0.0
|
||||
var cpuPercent = 0.0
|
||||
|
||||
// MemoryStats.Limit will never be 0 unless the container is not running and we havn't
|
||||
// got any data from cgroup
|
||||
if v.MemoryStats.Limit != 0 {
|
||||
memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0
|
||||
}
|
||||
|
||||
previousCPU = v.PreCPUStats.CPUUsage.TotalUsage
|
||||
previousSystem = v.PreCPUStats.SystemUsage
|
||||
cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v)
|
||||
blkRead, blkWrite := calculateBlockIO(v.BlkioStats)
|
||||
s.mu.Lock()
|
||||
s.CPUPercentage = cpuPercent
|
||||
s.Memory = float64(v.MemoryStats.Usage)
|
||||
s.MemoryLimit = float64(v.MemoryStats.Limit)
|
||||
s.MemoryPercentage = memPercent
|
||||
s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks)
|
||||
s.BlockRead = float64(blkRead)
|
||||
s.BlockWrite = float64(blkWrite)
|
||||
s.mu.Unlock()
|
||||
u <- nil
|
||||
if !streamStats {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-time.After(2 * time.Second):
|
||||
// zero out the values if we have not received an update within
|
||||
// the specified duration.
|
||||
s.mu.Lock()
|
||||
s.CPUPercentage = 0
|
||||
s.Memory = 0
|
||||
s.MemoryPercentage = 0
|
||||
s.MemoryLimit = 0
|
||||
s.NetworkRx = 0
|
||||
s.NetworkTx = 0
|
||||
s.BlockRead = 0
|
||||
s.BlockWrite = 0
|
||||
s.mu.Unlock()
|
||||
case err := <-u:
|
||||
if err != nil {
|
||||
s.mu.Lock()
|
||||
s.err = err
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
if !streamStats {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *containerStats) Display(w io.Writer) error {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
if s.err != nil {
|
||||
return s.err
|
||||
}
|
||||
fmt.Fprintf(w, "%s\t%.2f%%\t%s / %s\t%.2f%%\t%s / %s\t%s / %s\n",
|
||||
s.Name,
|
||||
s.CPUPercentage,
|
||||
units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit),
|
||||
s.MemoryPercentage,
|
||||
units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx),
|
||||
units.HumanSize(s.BlockRead), units.HumanSize(s.BlockWrite))
|
||||
return nil
|
||||
}
|
||||
|
||||
// CmdStats displays a live stream of resource usage statistics for one or more containers.
|
||||
//
|
||||
// This shows real-time information on CPU usage, memory usage, and network I/O.
|
||||
//
|
||||
// Usage: docker stats [OPTIONS] [CONTAINER...]
|
||||
func (cli *DockerCli) CmdStats(args ...string) error {
|
||||
cmd := Cli.Subcmd("stats", []string{"[CONTAINER...]"}, Cli.DockerCommands["stats"].Description, true)
|
||||
all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)")
|
||||
noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result")
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
names := cmd.Args()
|
||||
showAll := len(names) == 0
|
||||
|
||||
if showAll {
|
||||
v := url.Values{}
|
||||
if *all {
|
||||
v.Set("all", "1")
|
||||
}
|
||||
body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var cs []types.Container
|
||||
if err := json.Unmarshal(body, &cs); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range cs {
|
||||
names = append(names, c.ID[:12])
|
||||
}
|
||||
}
|
||||
if len(names) == 0 && !showAll {
|
||||
return fmt.Errorf("No containers found")
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
var (
|
||||
cStats = stats{}
|
||||
w = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
|
||||
)
|
||||
printHeader := func() {
|
||||
if !*noStream {
|
||||
fmt.Fprint(cli.out, "\033[2J")
|
||||
fmt.Fprint(cli.out, "\033[H")
|
||||
}
|
||||
io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\n")
|
||||
}
|
||||
for _, n := range names {
|
||||
s := &containerStats{Name: n}
|
||||
// no need to lock here since only the main goroutine is running here
|
||||
cStats.cs = append(cStats.cs, s)
|
||||
go s.Collect(cli, !*noStream)
|
||||
}
|
||||
closeChan := make(chan error)
|
||||
if showAll {
|
||||
type watch struct {
|
||||
cid string
|
||||
event string
|
||||
err error
|
||||
}
|
||||
getNewContainers := func(c chan<- watch) {
|
||||
res, err := cli.call("GET", "/events", nil, nil)
|
||||
if err != nil {
|
||||
c <- watch{err: err}
|
||||
return
|
||||
}
|
||||
defer res.body.Close()
|
||||
|
||||
dec := json.NewDecoder(res.body)
|
||||
for {
|
||||
var j *jsonmessage.JSONMessage
|
||||
if err := dec.Decode(&j); err != nil {
|
||||
c <- watch{err: err}
|
||||
return
|
||||
}
|
||||
c <- watch{j.ID[:12], j.Status, nil}
|
||||
}
|
||||
}
|
||||
go func(stopChan chan<- error) {
|
||||
cChan := make(chan watch)
|
||||
go getNewContainers(cChan)
|
||||
for {
|
||||
c := <-cChan
|
||||
if c.err != nil {
|
||||
stopChan <- c.err
|
||||
return
|
||||
}
|
||||
switch c.event {
|
||||
case "create":
|
||||
s := &containerStats{Name: c.cid}
|
||||
cStats.mu.Lock()
|
||||
cStats.cs = append(cStats.cs, s)
|
||||
cStats.mu.Unlock()
|
||||
go s.Collect(cli, !*noStream)
|
||||
case "stop":
|
||||
case "die":
|
||||
if !*all {
|
||||
var remove int
|
||||
// cStats cannot be O(1) with a map cause ranging over it would cause
|
||||
// containers in stats to move up and down in the list...:(
|
||||
cStats.mu.Lock()
|
||||
for i, s := range cStats.cs {
|
||||
if s.Name == c.cid {
|
||||
remove = i
|
||||
break
|
||||
}
|
||||
}
|
||||
cStats.cs = append(cStats.cs[:remove], cStats.cs[remove+1:]...)
|
||||
cStats.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}(closeChan)
|
||||
} else {
|
||||
close(closeChan)
|
||||
}
|
||||
// do a quick pause so that any failed connections for containers that do not exist are able to be
|
||||
// evicted before we display the initial or default values.
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
var errs []string
|
||||
cStats.mu.Lock()
|
||||
for _, c := range cStats.cs {
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err))
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
cStats.mu.Unlock()
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("%s", strings.Join(errs, ", "))
|
||||
}
|
||||
for range time.Tick(500 * time.Millisecond) {
|
||||
printHeader()
|
||||
toRemove := []int{}
|
||||
cStats.mu.Lock()
|
||||
for i, s := range cStats.cs {
|
||||
if err := s.Display(w); err != nil && !*noStream {
|
||||
toRemove = append(toRemove, i)
|
||||
}
|
||||
}
|
||||
for j := len(toRemove) - 1; j >= 0; j-- {
|
||||
i := toRemove[j]
|
||||
cStats.cs = append(cStats.cs[:i], cStats.cs[i+1:]...)
|
||||
}
|
||||
if len(cStats.cs) == 0 && !showAll {
|
||||
return nil
|
||||
}
|
||||
cStats.mu.Unlock()
|
||||
w.Flush()
|
||||
if *noStream {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case err, ok := <-closeChan:
|
||||
if ok {
|
||||
if err != nil {
|
||||
// this is suppressing "unexpected EOF" in the cli when the
|
||||
// daemon restarts so it shudowns cleanly
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
// just skip
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 {
|
||||
var (
|
||||
cpuPercent = 0.0
|
||||
// calculate the change for the cpu usage of the container in between readings
|
||||
cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage - previousCPU)
|
||||
// calculate the change for the entire system between readings
|
||||
systemDelta = float64(v.CPUStats.SystemUsage - previousSystem)
|
||||
)
|
||||
|
||||
if systemDelta > 0.0 && cpuDelta > 0.0 {
|
||||
cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0
|
||||
}
|
||||
return cpuPercent
|
||||
}
|
||||
|
||||
func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) {
|
||||
for _, bioEntry := range blkio.IoServiceBytesRecursive {
|
||||
switch strings.ToLower(bioEntry.Op) {
|
||||
case "read":
|
||||
blkRead = blkRead + bioEntry.Value
|
||||
case "write":
|
||||
blkWrite = blkWrite + bioEntry.Value
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) {
|
||||
var rx, tx float64
|
||||
|
||||
for _, v := range network {
|
||||
rx += float64(v.RxBytes)
|
||||
tx += float64(v.TxBytes)
|
||||
}
|
||||
return rx, tx
|
||||
}
|
46
vendor/github.com/docker/docker/api/client/stats_unit_test.go
generated
vendored
46
vendor/github.com/docker/docker/api/client/stats_unit_test.go
generated
vendored
@ -1,46 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
func TestDisplay(t *testing.T) {
|
||||
c := &containerStats{
|
||||
Name: "app",
|
||||
CPUPercentage: 30.0,
|
||||
Memory: 100 * 1024 * 1024.0,
|
||||
MemoryLimit: 2048 * 1024 * 1024.0,
|
||||
MemoryPercentage: 100.0 / 2048.0 * 100.0,
|
||||
NetworkRx: 100 * 1024 * 1024,
|
||||
NetworkTx: 800 * 1024 * 1024,
|
||||
BlockRead: 100 * 1024 * 1024,
|
||||
BlockWrite: 800 * 1024 * 1024,
|
||||
mu: sync.RWMutex{},
|
||||
}
|
||||
var b bytes.Buffer
|
||||
if err := c.Display(&b); err != nil {
|
||||
t.Fatalf("c.Display() gave error: %s", err)
|
||||
}
|
||||
got := b.String()
|
||||
want := "app\t30.00%\t104.9 MB / 2.147 GB\t4.88%\t104.9 MB / 838.9 MB\t104.9 MB / 838.9 MB\n"
|
||||
if got != want {
|
||||
t.Fatalf("c.Display() = %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCalculBlockIO(t *testing.T) {
|
||||
blkio := types.BlkioStats{
|
||||
IoServiceBytesRecursive: []types.BlkioStatEntry{{8, 0, "read", 1234}, {8, 1, "read", 4567}, {8, 0, "write", 123}, {8, 1, "write", 456}},
|
||||
}
|
||||
blkRead, blkWrite := calculateBlockIO(blkio)
|
||||
if blkRead != 5801 {
|
||||
t.Fatalf("blkRead = %d, want 5801", blkRead)
|
||||
}
|
||||
if blkWrite != 579 {
|
||||
t.Fatalf("blkWrite = %d, want 579", blkWrite)
|
||||
}
|
||||
}
|
41
vendor/github.com/docker/docker/api/client/stop.go
generated
vendored
41
vendor/github.com/docker/docker/api/client/stop.go
generated
vendored
@ -1,41 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
)
|
||||
|
||||
// CmdStop stops one or more containers.
|
||||
//
|
||||
// A running container is stopped by first sending SIGTERM and then SIGKILL if the container fails to stop within a grace period (the default is 10 seconds).
|
||||
//
|
||||
// Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...]
|
||||
func (cli *DockerCli) CmdStop(args ...string) error {
|
||||
cmd := Cli.Subcmd("stop", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["stop"].Description+".\nSending SIGTERM and then SIGKILL after a grace period", true)
|
||||
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing it")
|
||||
cmd.Require(flag.Min, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("t", strconv.Itoa(*nSeconds))
|
||||
|
||||
var errNames []string
|
||||
for _, name := range cmd.Args() {
|
||||
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, nil))
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
errNames = append(errNames, name)
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, "%s\n", name)
|
||||
}
|
||||
}
|
||||
if len(errNames) > 0 {
|
||||
return fmt.Errorf("Error: failed to stop containers: %v", errNames)
|
||||
}
|
||||
return nil
|
||||
}
|
42
vendor/github.com/docker/docker/api/client/tag.go
generated
vendored
42
vendor/github.com/docker/docker/api/client/tag.go
generated
vendored
@ -1,42 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/parsers"
|
||||
"github.com/docker/docker/registry"
|
||||
)
|
||||
|
||||
// CmdTag tags an image into a repository.
|
||||
//
|
||||
// Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]
|
||||
func (cli *DockerCli) CmdTag(args ...string) error {
|
||||
cmd := Cli.Subcmd("tag", []string{"IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]"}, Cli.DockerCommands["tag"].Description, true)
|
||||
force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force")
|
||||
cmd.Require(flag.Exact, 2)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var (
|
||||
repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1))
|
||||
v = url.Values{}
|
||||
)
|
||||
|
||||
//Check if the given image name can be resolved
|
||||
if err := registry.ValidateRepositoryName(repository); err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set("repo", repository)
|
||||
v.Set("tag", tag)
|
||||
|
||||
if *force {
|
||||
v.Set("force", "1")
|
||||
}
|
||||
|
||||
if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, nil)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
49
vendor/github.com/docker/docker/api/client/top.go
generated
vendored
49
vendor/github.com/docker/docker/api/client/top.go
generated
vendored
@ -1,49 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
)
|
||||
|
||||
// CmdTop displays the running processes of a container.
|
||||
//
|
||||
// Usage: docker top CONTAINER
|
||||
func (cli *DockerCli) CmdTop(args ...string) error {
|
||||
cmd := Cli.Subcmd("top", []string{"CONTAINER [ps OPTIONS]"}, Cli.DockerCommands["top"].Description, true)
|
||||
cmd.Require(flag.Min, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
val := url.Values{}
|
||||
if cmd.NArg() > 1 {
|
||||
val.Set("ps_args", strings.Join(cmd.Args()[1:], " "))
|
||||
}
|
||||
|
||||
serverResp, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
procList := types.ContainerProcessList{}
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&procList); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
|
||||
fmt.Fprintln(w, strings.Join(procList.Titles, "\t"))
|
||||
|
||||
for _, proc := range procList.Processes {
|
||||
fmt.Fprintln(w, strings.Join(proc, "\t"))
|
||||
}
|
||||
w.Flush()
|
||||
return nil
|
||||
}
|
479
vendor/github.com/docker/docker/api/client/trust.go
generated
vendored
479
vendor/github.com/docker/docker/api/client/trust.go
generated
vendored
@ -1,479 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/registry/client/auth"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/cliconfig"
|
||||
"github.com/docker/docker/pkg/ansiescape"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/tlsconfig"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/notary/client"
|
||||
"github.com/docker/notary/pkg/passphrase"
|
||||
"github.com/docker/notary/trustmanager"
|
||||
"github.com/endophage/gotuf/data"
|
||||
)
|
||||
|
||||
var untrusted bool
|
||||
|
||||
func addTrustedFlags(fs *flag.FlagSet, verify bool) {
|
||||
var trusted bool
|
||||
if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" {
|
||||
if t, err := strconv.ParseBool(e); t || err != nil {
|
||||
// treat any other value as true
|
||||
trusted = true
|
||||
}
|
||||
}
|
||||
message := "Skip image signing"
|
||||
if verify {
|
||||
message = "Skip image verification"
|
||||
}
|
||||
fs.BoolVar(&untrusted, []string{"-disable-content-trust"}, !trusted, message)
|
||||
}
|
||||
|
||||
func isTrusted() bool {
|
||||
return !untrusted
|
||||
}
|
||||
|
||||
var targetRegexp = regexp.MustCompile(`([\S]+): digest: ([\S]+) size: ([\d]+)`)
|
||||
|
||||
type target struct {
|
||||
reference registry.Reference
|
||||
digest digest.Digest
|
||||
size int64
|
||||
}
|
||||
|
||||
func (cli *DockerCli) trustDirectory() string {
|
||||
return filepath.Join(cliconfig.ConfigDir(), "trust")
|
||||
}
|
||||
|
||||
// certificateDirectory returns the directory containing
|
||||
// TLS certificates for the given server. An error is
|
||||
// returned if there was an error parsing the server string.
|
||||
func (cli *DockerCli) certificateDirectory(server string) (string, error) {
|
||||
u, err := url.Parse(server)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Join(cliconfig.ConfigDir(), "tls", u.Host), nil
|
||||
}
|
||||
|
||||
func trustServer(index *registry.IndexInfo) (string, error) {
|
||||
if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" {
|
||||
urlObj, err := url.Parse(s)
|
||||
if err != nil || urlObj.Scheme != "https" {
|
||||
return "", fmt.Errorf("valid https URL required for trust server, got %s", s)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
if index.Official {
|
||||
return registry.NotaryServer, nil
|
||||
}
|
||||
return "https://" + index.Name, nil
|
||||
}
|
||||
|
||||
type simpleCredentialStore struct {
|
||||
auth cliconfig.AuthConfig
|
||||
}
|
||||
|
||||
func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) {
|
||||
return scs.auth.Username, scs.auth.Password
|
||||
}
|
||||
|
||||
func (cli *DockerCli) getNotaryRepository(repoInfo *registry.RepositoryInfo, authConfig cliconfig.AuthConfig) (*client.NotaryRepository, error) {
|
||||
server, err := trustServer(repoInfo.Index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cfg = tlsconfig.ClientDefault
|
||||
cfg.InsecureSkipVerify = !repoInfo.Index.Secure
|
||||
|
||||
// Get certificate base directory
|
||||
certDir, err := cli.certificateDirectory(server)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("reading certificate directory: %s", certDir)
|
||||
|
||||
if err := registry.ReadCertsDirectory(&cfg, certDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
base := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSClientConfig: &cfg,
|
||||
DisableKeepAlives: true,
|
||||
}
|
||||
|
||||
// Skip configuration headers since request is not going to Docker daemon
|
||||
modifiers := registry.DockerHeaders(http.Header{})
|
||||
authTransport := transport.NewTransport(base, modifiers...)
|
||||
pingClient := &http.Client{
|
||||
Transport: authTransport,
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
endpointStr := server + "/v2/"
|
||||
req, err := http.NewRequest("GET", endpointStr, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
challengeManager := auth.NewSimpleChallengeManager()
|
||||
|
||||
resp, err := pingClient.Do(req)
|
||||
if err != nil {
|
||||
// Ignore error on ping to operate in offline mode
|
||||
logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err)
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Add response to the challenge manager to parse out
|
||||
// authentication header and register authentication method
|
||||
if err := challengeManager.AddResponse(resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
creds := simpleCredentialStore{auth: authConfig}
|
||||
tokenHandler := auth.NewTokenHandler(authTransport, creds, repoInfo.CanonicalName, "push", "pull")
|
||||
basicHandler := auth.NewBasicHandler(creds)
|
||||
modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)))
|
||||
tr := transport.NewTransport(base, modifiers...)
|
||||
|
||||
return client.NewNotaryRepository(cli.trustDirectory(), repoInfo.CanonicalName, server, tr, cli.getPassphraseRetriever())
|
||||
}
|
||||
|
||||
func convertTarget(t client.Target) (target, error) {
|
||||
h, ok := t.Hashes["sha256"]
|
||||
if !ok {
|
||||
return target{}, errors.New("no valid hash, expecting sha256")
|
||||
}
|
||||
return target{
|
||||
reference: registry.ParseReference(t.Name),
|
||||
digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)),
|
||||
size: t.Length,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever {
|
||||
aliasMap := map[string]string{
|
||||
"root": "root",
|
||||
"snapshot": "repository",
|
||||
"targets": "repository",
|
||||
}
|
||||
baseRetriever := passphrase.PromptRetrieverWithInOut(cli.in, cli.out, aliasMap)
|
||||
env := map[string]string{
|
||||
"root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"),
|
||||
"snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
|
||||
"targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
|
||||
}
|
||||
|
||||
// Backwards compatibility with old env names. We should remove this in 1.10
|
||||
if env["root"] == "" {
|
||||
if passphrase := os.Getenv("DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE"); passphrase != "" {
|
||||
env["root"] = passphrase
|
||||
fmt.Fprintf(cli.err, "[DEPRECATED] The environment variable DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE has been deprecated and will be removed in v1.10. Please use DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE\n")
|
||||
}
|
||||
}
|
||||
if env["snapshot"] == "" || env["targets"] == "" {
|
||||
if passphrase := os.Getenv("DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE"); passphrase != "" {
|
||||
env["snapshot"] = passphrase
|
||||
env["targets"] = passphrase
|
||||
fmt.Fprintf(cli.err, "[DEPRECATED] The environment variable DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE has been deprecated and will be removed in v1.10. Please use DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE\n")
|
||||
}
|
||||
}
|
||||
|
||||
return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) {
|
||||
if v := env[alias]; v != "" {
|
||||
return v, numAttempts > 1, nil
|
||||
}
|
||||
return baseRetriever(keyName, alias, createNew, numAttempts)
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *DockerCli) trustedReference(repo string, ref registry.Reference) (registry.Reference, error) {
|
||||
repoInfo, err := registry.ParseRepositoryInfo(repo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Resolve the Auth config relevant for this server
|
||||
authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index)
|
||||
|
||||
notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t, err := notaryRepo.GetTargetByName(ref.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, err := convertTarget(*t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
|
||||
return registry.DigestReference(r.digest), nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) tagTrusted(repoInfo *registry.RepositoryInfo, trustedRef, ref registry.Reference) error {
|
||||
fullName := trustedRef.ImageName(repoInfo.LocalName)
|
||||
fmt.Fprintf(cli.out, "Tagging %s as %s\n", fullName, ref.ImageName(repoInfo.LocalName))
|
||||
tv := url.Values{}
|
||||
tv.Set("repo", repoInfo.LocalName)
|
||||
tv.Set("tag", ref.String())
|
||||
tv.Set("force", "1")
|
||||
|
||||
if _, _, err := readBody(cli.call("POST", "/images/"+fullName+"/tag?"+tv.Encode(), nil, nil)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func notaryError(err error) error {
|
||||
switch err.(type) {
|
||||
case *json.SyntaxError:
|
||||
logrus.Debugf("Notary syntax error: %s", err)
|
||||
return errors.New("no trust data available for remote repository")
|
||||
case client.ErrExpired:
|
||||
return fmt.Errorf("remote repository out-of-date: %v", err)
|
||||
case trustmanager.ErrKeyNotFound:
|
||||
return fmt.Errorf("signing keys not found: %v", err)
|
||||
case *net.OpError:
|
||||
return fmt.Errorf("error contacting notary server: %v", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (cli *DockerCli) trustedPull(repoInfo *registry.RepositoryInfo, ref registry.Reference, authConfig cliconfig.AuthConfig) error {
|
||||
var (
|
||||
v = url.Values{}
|
||||
refs = []target{}
|
||||
)
|
||||
|
||||
notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if ref.String() == "" {
|
||||
// List all targets
|
||||
targets, err := notaryRepo.ListTargets()
|
||||
if err != nil {
|
||||
return notaryError(err)
|
||||
}
|
||||
for _, tgt := range targets {
|
||||
t, err := convertTarget(*tgt)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.out, "Skipping target for %q\n", repoInfo.LocalName)
|
||||
continue
|
||||
}
|
||||
refs = append(refs, t)
|
||||
}
|
||||
} else {
|
||||
t, err := notaryRepo.GetTargetByName(ref.String())
|
||||
if err != nil {
|
||||
return notaryError(err)
|
||||
}
|
||||
r, err := convertTarget(*t)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
}
|
||||
refs = append(refs, r)
|
||||
}
|
||||
|
||||
v.Set("fromImage", repoInfo.LocalName)
|
||||
for i, r := range refs {
|
||||
displayTag := r.reference.String()
|
||||
if displayTag != "" {
|
||||
displayTag = ":" + displayTag
|
||||
}
|
||||
fmt.Fprintf(cli.out, "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), repoInfo.LocalName, displayTag, r.digest)
|
||||
v.Set("tag", r.digest.String())
|
||||
|
||||
_, _, err = cli.clientRequestAttemptLogin("POST", "/images/create?"+v.Encode(), nil, cli.out, repoInfo.Index, "pull")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If reference is not trusted, tag by trusted reference
|
||||
if !r.reference.HasDigest() {
|
||||
if err := cli.tagTrusted(repoInfo, registry.DigestReference(r.digest), r.reference); err != nil {
|
||||
return err
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func selectKey(keys map[string]string) string {
|
||||
if len(keys) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
keyIDs := []string{}
|
||||
for k := range keys {
|
||||
keyIDs = append(keyIDs, k)
|
||||
}
|
||||
|
||||
// TODO(dmcgowan): let user choose if multiple keys, now pick consistently
|
||||
sort.Strings(keyIDs)
|
||||
|
||||
return keyIDs[0]
|
||||
}
|
||||
|
||||
func targetStream(in io.Writer) (io.WriteCloser, <-chan []target) {
|
||||
r, w := io.Pipe()
|
||||
out := io.MultiWriter(in, w)
|
||||
targetChan := make(chan []target)
|
||||
|
||||
go func() {
|
||||
targets := []target{}
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner.Split(ansiescape.ScanANSILines)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
if matches := targetRegexp.FindSubmatch(line); len(matches) == 4 {
|
||||
dgst, err := digest.ParseDigest(string(matches[2]))
|
||||
if err != nil {
|
||||
// Line does match what is expected, continue looking for valid lines
|
||||
logrus.Debugf("Bad digest value %q in matched line, ignoring\n", string(matches[2]))
|
||||
continue
|
||||
}
|
||||
s, err := strconv.ParseInt(string(matches[3]), 10, 64)
|
||||
if err != nil {
|
||||
// Line does match what is expected, continue looking for valid lines
|
||||
logrus.Debugf("Bad size value %q in matched line, ignoring\n", string(matches[3]))
|
||||
continue
|
||||
}
|
||||
|
||||
targets = append(targets, target{
|
||||
reference: registry.ParseReference(string(matches[1])),
|
||||
digest: dgst,
|
||||
size: s,
|
||||
})
|
||||
}
|
||||
}
|
||||
targetChan <- targets
|
||||
}()
|
||||
|
||||
return ioutils.NewWriteCloserWrapper(out, w.Close), targetChan
|
||||
}
|
||||
|
||||
func (cli *DockerCli) trustedPush(repoInfo *registry.RepositoryInfo, tag string, authConfig cliconfig.AuthConfig) error {
|
||||
streamOut, targetChan := targetStream(cli.out)
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("tag", tag)
|
||||
|
||||
_, _, err := cli.clientRequestAttemptLogin("POST", "/images/"+repoInfo.LocalName+"/push?"+v.Encode(), nil, streamOut, repoInfo.Index, "push")
|
||||
// Close stream channel to finish target parsing
|
||||
if err := streamOut.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Check error from request
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get target results
|
||||
targets := <-targetChan
|
||||
|
||||
if tag == "" {
|
||||
fmt.Fprintf(cli.out, "No tag specified, skipping trust metadata push\n")
|
||||
return nil
|
||||
}
|
||||
if len(targets) == 0 {
|
||||
fmt.Fprintf(cli.out, "No targets found, skipping trust metadata push\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Fprintf(cli.out, "Signing and pushing trust metadata\n")
|
||||
|
||||
repo, err := cli.getNotaryRepository(repoInfo, authConfig)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.out, "Error establishing connection to notary repository: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, target := range targets {
|
||||
h, err := hex.DecodeString(target.digest.Hex())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t := &client.Target{
|
||||
Name: target.reference.String(),
|
||||
Hashes: data.Hashes{
|
||||
string(target.digest.Algorithm()): h,
|
||||
},
|
||||
Length: int64(target.size),
|
||||
}
|
||||
if err := repo.AddTarget(t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = repo.Publish()
|
||||
if _, ok := err.(*client.ErrRepoNotInitialized); !ok {
|
||||
return notaryError(err)
|
||||
}
|
||||
|
||||
ks := repo.KeyStoreManager
|
||||
keys := ks.RootKeyStore().ListKeys()
|
||||
|
||||
rootKey := selectKey(keys)
|
||||
if rootKey == "" {
|
||||
rootKey, err = ks.GenRootKey("ecdsa")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
cryptoService, err := ks.GetRootCryptoService(rootKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := repo.Initialize(cryptoService); err != nil {
|
||||
return notaryError(err)
|
||||
}
|
||||
fmt.Fprintf(cli.out, "Finished initializing %q\n", repoInfo.CanonicalName)
|
||||
|
||||
return notaryError(repo.Publish())
|
||||
}
|
55
vendor/github.com/docker/docker/api/client/trust_test.go
generated
vendored
55
vendor/github.com/docker/docker/api/client/trust_test.go
generated
vendored
@ -1,55 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/registry"
|
||||
)
|
||||
|
||||
func unsetENV() {
|
||||
os.Unsetenv("DOCKER_CONTENT_TRUST")
|
||||
os.Unsetenv("DOCKER_CONTENT_TRUST_SERVER")
|
||||
}
|
||||
|
||||
func TestENVTrustServer(t *testing.T) {
|
||||
defer unsetENV()
|
||||
indexInfo := ®istry.IndexInfo{Name: "testserver"}
|
||||
if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "https://notary-test.com:5000"); err != nil {
|
||||
t.Fatal("Failed to set ENV variable")
|
||||
}
|
||||
output, err := trustServer(indexInfo)
|
||||
expectedStr := "https://notary-test.com:5000"
|
||||
if err != nil || output != expectedStr {
|
||||
t.Fatalf("Expected server to be %s, got %s", expectedStr, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPENVTrustServer(t *testing.T) {
|
||||
defer unsetENV()
|
||||
indexInfo := ®istry.IndexInfo{Name: "testserver"}
|
||||
if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "http://notary-test.com:5000"); err != nil {
|
||||
t.Fatal("Failed to set ENV variable")
|
||||
}
|
||||
_, err := trustServer(indexInfo)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error with invalid scheme")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOfficialTrustServer(t *testing.T) {
|
||||
indexInfo := ®istry.IndexInfo{Name: "testserver", Official: true}
|
||||
output, err := trustServer(indexInfo)
|
||||
if err != nil || output != registry.NotaryServer {
|
||||
t.Fatalf("Expected server to be %s, got %s", registry.NotaryServer, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNonOfficialTrustServer(t *testing.T) {
|
||||
indexInfo := ®istry.IndexInfo{Name: "testserver", Official: false}
|
||||
output, err := trustServer(indexInfo)
|
||||
expectedStr := "https://" + indexInfo.Name
|
||||
if err != nil || output != expectedStr {
|
||||
t.Fatalf("Expected server to be %s, got %s", expectedStr, output)
|
||||
}
|
||||
}
|
32
vendor/github.com/docker/docker/api/client/unpause.go
generated
vendored
32
vendor/github.com/docker/docker/api/client/unpause.go
generated
vendored
@ -1,32 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
)
|
||||
|
||||
// CmdUnpause unpauses all processes within a container, for one or more containers.
|
||||
//
|
||||
// Usage: docker unpause CONTAINER [CONTAINER...]
|
||||
func (cli *DockerCli) CmdUnpause(args ...string) error {
|
||||
cmd := Cli.Subcmd("unpause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["unpause"].Description, true)
|
||||
cmd.Require(flag.Min, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var errNames []string
|
||||
for _, name := range cmd.Args() {
|
||||
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/unpause", name), nil, nil)); err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
errNames = append(errNames, name)
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, "%s\n", name)
|
||||
}
|
||||
}
|
||||
if len(errNames) > 0 {
|
||||
return fmt.Errorf("Error: failed to unpause containers: %v", errNames)
|
||||
}
|
||||
return nil
|
||||
}
|
384
vendor/github.com/docker/docker/api/client/utils.go
generated
vendored
384
vendor/github.com/docker/docker/api/client/utils.go
generated
vendored
@ -1,384 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
gosignal "os/signal"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/cliconfig"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
errConnectionFailed = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?")
|
||||
)
|
||||
|
||||
type serverResponse struct {
|
||||
body io.ReadCloser
|
||||
header http.Header
|
||||
statusCode int
|
||||
}
|
||||
|
||||
// HTTPClient creates a new HTTP client with the cli's client transport instance.
|
||||
func (cli *DockerCli) HTTPClient() *http.Client {
|
||||
return &http.Client{Transport: cli.transport}
|
||||
}
|
||||
|
||||
func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) {
|
||||
params := bytes.NewBuffer(nil)
|
||||
if data != nil {
|
||||
if err := json.NewEncoder(params).Encode(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return params, nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) clientRequest(method, path string, in io.Reader, headers map[string][]string) (*serverResponse, error) {
|
||||
|
||||
serverResp := &serverResponse{
|
||||
body: nil,
|
||||
statusCode: -1,
|
||||
}
|
||||
|
||||
expectedPayload := (method == "POST" || method == "PUT")
|
||||
if expectedPayload && in == nil {
|
||||
in = bytes.NewReader([]byte{})
|
||||
}
|
||||
req, err := http.NewRequest(method, fmt.Sprintf("%s/v%s%s", cli.basePath, api.Version, path), in)
|
||||
if err != nil {
|
||||
return serverResp, err
|
||||
}
|
||||
|
||||
// Add CLI Config's HTTP Headers BEFORE we set the Docker headers
|
||||
// then the user can't change OUR headers
|
||||
for k, v := range cli.configFile.HTTPHeaders {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
|
||||
req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.Version+" ("+runtime.GOOS+")")
|
||||
req.URL.Host = cli.addr
|
||||
req.URL.Scheme = cli.scheme
|
||||
|
||||
if headers != nil {
|
||||
for k, v := range headers {
|
||||
req.Header[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if expectedPayload && req.Header.Get("Content-Type") == "" {
|
||||
req.Header.Set("Content-Type", "text/plain")
|
||||
}
|
||||
|
||||
resp, err := cli.HTTPClient().Do(req)
|
||||
if resp != nil {
|
||||
serverResp.statusCode = resp.StatusCode
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if utils.IsTimeout(err) || strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
|
||||
return serverResp, errConnectionFailed
|
||||
}
|
||||
|
||||
if cli.tlsConfig == nil && strings.Contains(err.Error(), "malformed HTTP response") {
|
||||
return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)
|
||||
}
|
||||
if cli.tlsConfig != nil && strings.Contains(err.Error(), "remote error: bad certificate") {
|
||||
return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err)
|
||||
}
|
||||
|
||||
return serverResp, fmt.Errorf("An error occurred trying to connect: %v", err)
|
||||
}
|
||||
|
||||
if serverResp.statusCode < 200 || serverResp.statusCode >= 400 {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return serverResp, err
|
||||
}
|
||||
if len(body) == 0 {
|
||||
return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL)
|
||||
}
|
||||
return serverResp, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body))
|
||||
}
|
||||
|
||||
serverResp.body = resp.Body
|
||||
serverResp.header = resp.Header
|
||||
return serverResp, nil
|
||||
}
|
||||
|
||||
// cmdAttempt builds the corresponding registry Auth Header from the given
|
||||
// authConfig. It returns the servers body, status, error response
|
||||
func (cli *DockerCli) cmdAttempt(authConfig cliconfig.AuthConfig, method, path string, in io.Reader, out io.Writer) (io.ReadCloser, int, error) {
|
||||
buf, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
registryAuthHeader := []string{
|
||||
base64.URLEncoding.EncodeToString(buf),
|
||||
}
|
||||
|
||||
// begin the request
|
||||
serverResp, err := cli.clientRequest(method, path, in, map[string][]string{
|
||||
"X-Registry-Auth": registryAuthHeader,
|
||||
})
|
||||
if err == nil && out != nil {
|
||||
// If we are streaming output, complete the stream since
|
||||
// errors may not appear until later.
|
||||
err = cli.streamBody(serverResp.body, serverResp.header.Get("Content-Type"), true, out, nil)
|
||||
}
|
||||
if err != nil {
|
||||
// Since errors in a stream appear after status 200 has been written,
|
||||
// we may need to change the status code.
|
||||
if strings.Contains(err.Error(), "Authentication is required") ||
|
||||
strings.Contains(err.Error(), "Status 401") ||
|
||||
strings.Contains(err.Error(), "401 Unauthorized") ||
|
||||
strings.Contains(err.Error(), "status code 401") {
|
||||
serverResp.statusCode = http.StatusUnauthorized
|
||||
}
|
||||
}
|
||||
return serverResp.body, serverResp.statusCode, err
|
||||
}
|
||||
|
||||
func (cli *DockerCli) clientRequestAttemptLogin(method, path string, in io.Reader, out io.Writer, index *registry.IndexInfo, cmdName string) (io.ReadCloser, int, error) {
|
||||
|
||||
// Resolve the Auth config relevant for this server
|
||||
authConfig := registry.ResolveAuthConfig(cli.configFile, index)
|
||||
body, statusCode, err := cli.cmdAttempt(authConfig, method, path, in, out)
|
||||
if statusCode == http.StatusUnauthorized {
|
||||
fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName)
|
||||
if err = cli.CmdLogin(index.GetAuthConfigKey()); err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
authConfig = registry.ResolveAuthConfig(cli.configFile, index)
|
||||
return cli.cmdAttempt(authConfig, method, path, in, out)
|
||||
}
|
||||
return body, statusCode, err
|
||||
}
|
||||
|
||||
func (cli *DockerCli) callWrapper(method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, http.Header, int, error) {
|
||||
sr, err := cli.call(method, path, data, headers)
|
||||
return sr.body, sr.header, sr.statusCode, err
|
||||
}
|
||||
|
||||
func (cli *DockerCli) call(method, path string, data interface{}, headers map[string][]string) (*serverResponse, error) {
|
||||
params, err := cli.encodeData(data)
|
||||
if err != nil {
|
||||
sr := &serverResponse{
|
||||
body: nil,
|
||||
header: nil,
|
||||
statusCode: -1,
|
||||
}
|
||||
return sr, nil
|
||||
}
|
||||
|
||||
if data != nil {
|
||||
if headers == nil {
|
||||
headers = make(map[string][]string)
|
||||
}
|
||||
headers["Content-Type"] = []string{"application/json"}
|
||||
}
|
||||
|
||||
serverResp, err := cli.clientRequest(method, path, params, headers)
|
||||
return serverResp, err
|
||||
}
|
||||
|
||||
type streamOpts struct {
|
||||
rawTerminal bool
|
||||
in io.Reader
|
||||
out io.Writer
|
||||
err io.Writer
|
||||
headers map[string][]string
|
||||
}
|
||||
|
||||
func (cli *DockerCli) stream(method, path string, opts *streamOpts) (*serverResponse, error) {
|
||||
serverResp, err := cli.clientRequest(method, path, opts.in, opts.headers)
|
||||
if err != nil {
|
||||
return serverResp, err
|
||||
}
|
||||
return serverResp, cli.streamBody(serverResp.body, serverResp.header.Get("Content-Type"), opts.rawTerminal, opts.out, opts.err)
|
||||
}
|
||||
|
||||
func (cli *DockerCli) streamBody(body io.ReadCloser, contentType string, rawTerminal bool, stdout, stderr io.Writer) error {
|
||||
defer body.Close()
|
||||
|
||||
if api.MatchesContentType(contentType, "application/json") {
|
||||
return jsonmessage.DisplayJSONMessagesStream(body, stdout, cli.outFd, cli.isTerminalOut)
|
||||
}
|
||||
if stdout != nil || stderr != nil {
|
||||
// When TTY is ON, use regular copy
|
||||
var err error
|
||||
if rawTerminal {
|
||||
_, err = io.Copy(stdout, body)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(stdout, stderr, body)
|
||||
}
|
||||
logrus.Debugf("[stream] End of stdout")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) resizeTty(id string, isExec bool) {
|
||||
height, width := cli.getTtySize()
|
||||
if height == 0 && width == 0 {
|
||||
return
|
||||
}
|
||||
v := url.Values{}
|
||||
v.Set("h", strconv.Itoa(height))
|
||||
v.Set("w", strconv.Itoa(width))
|
||||
|
||||
path := ""
|
||||
if !isExec {
|
||||
path = "/containers/" + id + "/resize?"
|
||||
} else {
|
||||
path = "/exec/" + id + "/resize?"
|
||||
}
|
||||
|
||||
if _, _, err := readBody(cli.call("POST", path+v.Encode(), nil, nil)); err != nil {
|
||||
logrus.Debugf("Error resize: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForExit(cli *DockerCli, containerID string) (int, error) {
|
||||
serverResp, err := cli.call("POST", "/containers/"+containerID+"/wait", nil, nil)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
var res types.ContainerWaitResponse
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&res); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return res.StatusCode, nil
|
||||
}
|
||||
|
||||
// getExitCode perform an inspect on the container. It returns
|
||||
// the running state and the exit code.
|
||||
func getExitCode(cli *DockerCli, containerID string) (bool, int, error) {
|
||||
serverResp, err := cli.call("GET", "/containers/"+containerID+"/json", nil, nil)
|
||||
if err != nil {
|
||||
// If we can't connect, then the daemon probably died.
|
||||
if err != errConnectionFailed {
|
||||
return false, -1, err
|
||||
}
|
||||
return false, -1, nil
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
var c types.ContainerJSON
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
|
||||
return c.State.Running, c.State.ExitCode, nil
|
||||
}
|
||||
|
||||
// getExecExitCode perform an inspect on the exec command. It returns
|
||||
// the running state and the exit code.
|
||||
func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) {
|
||||
serverResp, err := cli.call("GET", "/exec/"+execID+"/json", nil, nil)
|
||||
if err != nil {
|
||||
// If we can't connect, then the daemon probably died.
|
||||
if err != errConnectionFailed {
|
||||
return false, -1, err
|
||||
}
|
||||
return false, -1, nil
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
//TODO: Should we reconsider having a type in api/types?
|
||||
//this is a response to exex/id/json not container
|
||||
var c struct {
|
||||
Running bool
|
||||
ExitCode int
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(serverResp.body).Decode(&c); err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
|
||||
return c.Running, c.ExitCode, nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) monitorTtySize(id string, isExec bool) error {
|
||||
cli.resizeTty(id, isExec)
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
go func() {
|
||||
prevH, prevW := cli.getTtySize()
|
||||
for {
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
h, w := cli.getTtySize()
|
||||
|
||||
if prevW != w || prevH != h {
|
||||
cli.resizeTty(id, isExec)
|
||||
}
|
||||
prevH = h
|
||||
prevW = w
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
gosignal.Notify(sigchan, signal.SIGWINCH)
|
||||
go func() {
|
||||
for range sigchan {
|
||||
cli.resizeTty(id, isExec)
|
||||
}
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) getTtySize() (int, int) {
|
||||
if !cli.isTerminalOut {
|
||||
return 0, 0
|
||||
}
|
||||
ws, err := term.GetWinsize(cli.outFd)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error getting size: %s", err)
|
||||
if ws == nil {
|
||||
return 0, 0
|
||||
}
|
||||
}
|
||||
return int(ws.Height), int(ws.Width)
|
||||
}
|
||||
|
||||
func readBody(serverResp *serverResponse, err error) ([]byte, int, error) {
|
||||
if serverResp.body != nil {
|
||||
defer serverResp.body.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, serverResp.statusCode, err
|
||||
}
|
||||
body, err := ioutil.ReadAll(serverResp.body)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
return body, serverResp.statusCode, nil
|
||||
}
|
96
vendor/github.com/docker/docker/api/client/version.go
generated
vendored
96
vendor/github.com/docker/docker/api/client/version.go
generated
vendored
@ -1,96 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"runtime"
|
||||
"text/template"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
var versionTemplate = `Client:
|
||||
Version: {{.Client.Version}}
|
||||
API version: {{.Client.APIVersion}}
|
||||
Go version: {{.Client.GoVersion}}
|
||||
Git commit: {{.Client.GitCommit}}
|
||||
Built: {{.Client.BuildTime}}
|
||||
OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .Client.Experimental}}
|
||||
Experimental: {{.Client.Experimental}}{{end}}{{if .ServerOK}}
|
||||
|
||||
Server:
|
||||
Version: {{.Server.Version}}
|
||||
API version: {{.Server.APIVersion}}
|
||||
Go version: {{.Server.GoVersion}}
|
||||
Git commit: {{.Server.GitCommit}}
|
||||
Built: {{.Server.BuildTime}}
|
||||
OS/Arch: {{.Server.Os}}/{{.Server.Arch}}{{if .Server.Experimental}}
|
||||
Experimental: {{.Server.Experimental}}{{end}}{{end}}`
|
||||
|
||||
type versionData struct {
|
||||
Client types.Version
|
||||
ServerOK bool
|
||||
Server types.Version
|
||||
}
|
||||
|
||||
// CmdVersion shows Docker version information.
|
||||
//
|
||||
// Available version information is shown for: client Docker version, client API version, client Go version, client Git commit, client OS/Arch, server Docker version, server API version, server Go version, server Git commit, and server OS/Arch.
|
||||
//
|
||||
// Usage: docker version
|
||||
func (cli *DockerCli) CmdVersion(args ...string) (err error) {
|
||||
cmd := Cli.Subcmd("version", nil, Cli.DockerCommands["version"].Description, true)
|
||||
tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template")
|
||||
cmd.Require(flag.Exact, 0)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
if *tmplStr == "" {
|
||||
*tmplStr = versionTemplate
|
||||
}
|
||||
|
||||
var tmpl *template.Template
|
||||
if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil {
|
||||
return Cli.StatusError{StatusCode: 64,
|
||||
Status: "Template parsing error: " + err.Error()}
|
||||
}
|
||||
|
||||
vd := versionData{
|
||||
Client: types.Version{
|
||||
Version: dockerversion.Version,
|
||||
APIVersion: api.Version,
|
||||
GoVersion: runtime.Version(),
|
||||
GitCommit: dockerversion.GitCommit,
|
||||
BuildTime: dockerversion.BuildTime,
|
||||
Os: runtime.GOOS,
|
||||
Arch: runtime.GOARCH,
|
||||
Experimental: utils.ExperimentalBuild(),
|
||||
},
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err2 := tmpl.Execute(cli.out, vd); err2 != nil && err == nil {
|
||||
err = err2
|
||||
}
|
||||
cli.out.Write([]byte{'\n'})
|
||||
}()
|
||||
|
||||
serverResp, err := cli.call("GET", "/version", nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer serverResp.body.Close()
|
||||
|
||||
if err = json.NewDecoder(serverResp.body).Decode(&vd.Server); err != nil {
|
||||
return Cli.StatusError{StatusCode: 1,
|
||||
Status: "Error reading remote version: " + err.Error()}
|
||||
}
|
||||
|
||||
vd.ServerOK = true
|
||||
|
||||
return
|
||||
}
|
234
vendor/github.com/docker/docker/api/client/volume.go
generated
vendored
234
vendor/github.com/docker/docker/api/client/volume.go
generated
vendored
@ -1,234 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
Cli "github.com/docker/docker/cli"
|
||||
"github.com/docker/docker/opts"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
"github.com/docker/docker/pkg/parsers/filters"
|
||||
)
|
||||
|
||||
// CmdVolume is the parent subcommand for all volume commands
|
||||
//
|
||||
// Usage: docker volume <COMMAND> <OPTS>
|
||||
func (cli *DockerCli) CmdVolume(args ...string) error {
|
||||
description := Cli.DockerCommands["volume"].Description + "\n\nCommands:\n"
|
||||
commands := [][]string{
|
||||
{"create", "Create a volume"},
|
||||
{"inspect", "Return low-level information on a volume"},
|
||||
{"ls", "List volumes"},
|
||||
{"rm", "Remove a volume"},
|
||||
}
|
||||
|
||||
for _, cmd := range commands {
|
||||
description += fmt.Sprintf(" %-25.25s%s\n", cmd[0], cmd[1])
|
||||
}
|
||||
|
||||
description += "\nRun 'docker volume COMMAND --help' for more information on a command"
|
||||
cmd := Cli.Subcmd("volume", []string{"[COMMAND]"}, description, false)
|
||||
|
||||
cmd.Require(flag.Exact, 0)
|
||||
err := cmd.ParseFlags(args, true)
|
||||
cmd.Usage()
|
||||
return err
|
||||
}
|
||||
|
||||
// CmdVolumeLs outputs a list of Docker volumes.
|
||||
//
|
||||
// Usage: docker volume ls [OPTIONS]
|
||||
func (cli *DockerCli) CmdVolumeLs(args ...string) error {
|
||||
cmd := Cli.Subcmd("volume ls", nil, "List volumes", true)
|
||||
|
||||
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display volume names")
|
||||
flFilter := opts.NewListOpts(nil)
|
||||
cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')")
|
||||
|
||||
cmd.Require(flag.Exact, 0)
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
volFilterArgs := filters.Args{}
|
||||
for _, f := range flFilter.GetAll() {
|
||||
var err error
|
||||
volFilterArgs, err = filters.ParseFlag(f, volFilterArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
if len(volFilterArgs) > 0 {
|
||||
filterJSON, err := filters.ToParam(volFilterArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set("filters", filterJSON)
|
||||
}
|
||||
|
||||
resp, err := cli.call("GET", "/volumes?"+v.Encode(), nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var volumes types.VolumesListResponse
|
||||
if err := json.NewDecoder(resp.body).Decode(&volumes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
|
||||
if !*quiet {
|
||||
fmt.Fprintf(w, "DRIVER \tVOLUME NAME")
|
||||
fmt.Fprintf(w, "\n")
|
||||
}
|
||||
|
||||
for _, vol := range volumes.Volumes {
|
||||
if *quiet {
|
||||
fmt.Fprintln(w, vol.Name)
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(w, "%s\t%s\n", vol.Driver, vol.Name)
|
||||
}
|
||||
w.Flush()
|
||||
return nil
|
||||
}
|
||||
|
||||
// CmdVolumeInspect displays low-level information on one or more volumes.
|
||||
//
|
||||
// Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...]
|
||||
func (cli *DockerCli) CmdVolumeInspect(args ...string) error {
|
||||
cmd := Cli.Subcmd("volume inspect", []string{"VOLUME [VOLUME...]"}, "Return low-level information on a volume", true)
|
||||
tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template")
|
||||
|
||||
cmd.Require(flag.Min, 1)
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
if err := cmd.Parse(args); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var tmpl *template.Template
|
||||
if *tmplStr != "" {
|
||||
var err error
|
||||
tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var status = 0
|
||||
var volumes []*types.Volume
|
||||
for _, name := range cmd.Args() {
|
||||
resp, err := cli.call("GET", "/volumes/"+name, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var volume types.Volume
|
||||
if err := json.NewDecoder(resp.body).Decode(&volume); err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
status = 1
|
||||
continue
|
||||
}
|
||||
|
||||
if tmpl == nil {
|
||||
volumes = append(volumes, &volume)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := tmpl.Execute(cli.out, &volume); err != nil {
|
||||
if err := tmpl.Execute(cli.out, &volume); err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
status = 1
|
||||
continue
|
||||
}
|
||||
}
|
||||
io.WriteString(cli.out, "\n")
|
||||
}
|
||||
|
||||
if tmpl != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(volumes, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(cli.out, bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(cli.out, "\n")
|
||||
|
||||
if status != 0 {
|
||||
return Cli.StatusError{StatusCode: status}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CmdVolumeCreate creates a new container from a given image.
|
||||
//
|
||||
// Usage: docker volume create [OPTIONS]
|
||||
func (cli *DockerCli) CmdVolumeCreate(args ...string) error {
|
||||
cmd := Cli.Subcmd("volume create", nil, "Create a volume", true)
|
||||
flDriver := cmd.String([]string{"d", "-driver"}, "local", "Specify volume driver name")
|
||||
flName := cmd.String([]string{"-name"}, "", "Specify volume name")
|
||||
|
||||
flDriverOpts := opts.NewMapOpts(nil, nil)
|
||||
cmd.Var(flDriverOpts, []string{"o", "-opt"}, "Set driver specific options")
|
||||
|
||||
cmd.Require(flag.Exact, 0)
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
volReq := &types.VolumeCreateRequest{
|
||||
Driver: *flDriver,
|
||||
DriverOpts: flDriverOpts.GetAll(),
|
||||
}
|
||||
|
||||
if *flName != "" {
|
||||
volReq.Name = *flName
|
||||
}
|
||||
|
||||
resp, err := cli.call("POST", "/volumes/create", volReq, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var vol types.Volume
|
||||
if err := json.NewDecoder(resp.body).Decode(&vol); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(cli.out, "%s\n", vol.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CmdVolumeRm removes one or more containers.
|
||||
//
|
||||
// Usage: docker volume rm VOLUME [VOLUME...]
|
||||
func (cli *DockerCli) CmdVolumeRm(args ...string) error {
|
||||
cmd := Cli.Subcmd("volume rm", []string{"VOLUME [VOLUME...]"}, "Remove a volume", true)
|
||||
cmd.Require(flag.Min, 1)
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var status = 0
|
||||
for _, name := range cmd.Args() {
|
||||
_, err := cli.call("DELETE", "/volumes/"+name, nil, nil)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
status = 1
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(cli.out, "%s\n", name)
|
||||
}
|
||||
|
||||
if status != 0 {
|
||||
return Cli.StatusError{StatusCode: status}
|
||||
}
|
||||
return nil
|
||||
}
|
35
vendor/github.com/docker/docker/api/client/wait.go
generated
vendored
35
vendor/github.com/docker/docker/api/client/wait.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
Cli "github.com/docker/docker/cli"
|
||||
flag "github.com/docker/docker/pkg/mflag"
|
||||
)
|
||||
|
||||
// CmdWait blocks until a container stops, then prints its exit code.
|
||||
//
|
||||
// If more than one container is specified, this will wait synchronously on each container.
|
||||
//
|
||||
// Usage: docker wait CONTAINER [CONTAINER...]
|
||||
func (cli *DockerCli) CmdWait(args ...string) error {
|
||||
cmd := Cli.Subcmd("wait", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["wait"].Description, true)
|
||||
cmd.Require(flag.Min, 1)
|
||||
|
||||
cmd.ParseFlags(args, true)
|
||||
|
||||
var errNames []string
|
||||
for _, name := range cmd.Args() {
|
||||
status, err := waitForExit(cli, name)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.err, "%s\n", err)
|
||||
errNames = append(errNames, name)
|
||||
} else {
|
||||
fmt.Fprintf(cli.out, "%d\n", status)
|
||||
}
|
||||
}
|
||||
if len(errNames) > 0 {
|
||||
return fmt.Errorf("Error: failed to wait containers: %v", errNames)
|
||||
}
|
||||
return nil
|
||||
}
|
145
vendor/github.com/docker/docker/api/common.go
generated
vendored
145
vendor/github.com/docker/docker/api/common.go
generated
vendored
@ -1,145 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"mime"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/pkg/version"
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// Version of Current REST API
|
||||
Version version.Version = "1.22"
|
||||
|
||||
// MinVersion represents Minimun REST API version supported
|
||||
MinVersion version.Version = "1.12"
|
||||
|
||||
// DefaultDockerfileName is the Default filename with Docker commands, read by docker build
|
||||
DefaultDockerfileName string = "Dockerfile"
|
||||
)
|
||||
|
||||
// byPortInfo is a temporary type used to sort types.Port by its fields
|
||||
type byPortInfo []types.Port
|
||||
|
||||
func (r byPortInfo) Len() int { return len(r) }
|
||||
func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
|
||||
func (r byPortInfo) Less(i, j int) bool {
|
||||
if r[i].PrivatePort != r[j].PrivatePort {
|
||||
return r[i].PrivatePort < r[j].PrivatePort
|
||||
}
|
||||
|
||||
if r[i].IP != r[j].IP {
|
||||
return r[i].IP < r[j].IP
|
||||
}
|
||||
|
||||
if r[i].PublicPort != r[j].PublicPort {
|
||||
return r[i].PublicPort < r[j].PublicPort
|
||||
}
|
||||
|
||||
return r[i].Type < r[j].Type
|
||||
}
|
||||
|
||||
// DisplayablePorts returns formatted string representing open ports of container
|
||||
// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp"
|
||||
// it's used by command 'docker ps'
|
||||
func DisplayablePorts(ports []types.Port) string {
|
||||
type portGroup struct {
|
||||
first int
|
||||
last int
|
||||
}
|
||||
groupMap := make(map[string]*portGroup)
|
||||
var result []string
|
||||
var hostMappings []string
|
||||
var groupMapKeys []string
|
||||
sort.Sort(byPortInfo(ports))
|
||||
for _, port := range ports {
|
||||
current := port.PrivatePort
|
||||
portKey := port.Type
|
||||
if port.IP != "" {
|
||||
if port.PublicPort != current {
|
||||
hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type))
|
||||
continue
|
||||
}
|
||||
portKey = fmt.Sprintf("%s/%s", port.IP, port.Type)
|
||||
}
|
||||
group := groupMap[portKey]
|
||||
|
||||
if group == nil {
|
||||
groupMap[portKey] = &portGroup{first: current, last: current}
|
||||
// record order that groupMap keys are created
|
||||
groupMapKeys = append(groupMapKeys, portKey)
|
||||
continue
|
||||
}
|
||||
if current == (group.last + 1) {
|
||||
group.last = current
|
||||
continue
|
||||
}
|
||||
|
||||
result = append(result, formGroup(portKey, group.first, group.last))
|
||||
groupMap[portKey] = &portGroup{first: current, last: current}
|
||||
}
|
||||
for _, portKey := range groupMapKeys {
|
||||
g := groupMap[portKey]
|
||||
result = append(result, formGroup(portKey, g.first, g.last))
|
||||
}
|
||||
result = append(result, hostMappings...)
|
||||
return strings.Join(result, ", ")
|
||||
}
|
||||
|
||||
func formGroup(key string, start, last int) string {
|
||||
parts := strings.Split(key, "/")
|
||||
groupType := parts[0]
|
||||
var ip string
|
||||
if len(parts) > 1 {
|
||||
ip = parts[0]
|
||||
groupType = parts[1]
|
||||
}
|
||||
group := strconv.Itoa(start)
|
||||
if start != last {
|
||||
group = fmt.Sprintf("%s-%d", group, last)
|
||||
}
|
||||
if ip != "" {
|
||||
group = fmt.Sprintf("%s:%s->%s", ip, group, group)
|
||||
}
|
||||
return fmt.Sprintf("%s/%s", group, groupType)
|
||||
}
|
||||
|
||||
// MatchesContentType validates the content type against the expected one
|
||||
func MatchesContentType(contentType, expectedType string) bool {
|
||||
mimetype, _, err := mime.ParseMediaType(contentType)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error parsing media type: %s error: %v", contentType, err)
|
||||
}
|
||||
return err == nil && mimetype == expectedType
|
||||
}
|
||||
|
||||
// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
|
||||
// otherwise generates a new one
|
||||
func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
|
||||
err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
|
||||
if err == libtrust.ErrKeyFileDoesNotExist {
|
||||
trustKey, err = libtrust.GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error generating key: %s", err)
|
||||
}
|
||||
if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil {
|
||||
return nil, fmt.Errorf("Error saving key file: %s", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
|
||||
}
|
||||
return trustKey, nil
|
||||
}
|
340
vendor/github.com/docker/docker/api/common_test.go
generated
vendored
340
vendor/github.com/docker/docker/api/common_test.go
generated
vendored
@ -1,340 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"os"
|
||||
)
|
||||
|
||||
type ports struct {
|
||||
ports []types.Port
|
||||
expected string
|
||||
}
|
||||
|
||||
// DisplayablePorts
|
||||
func TestDisplayablePorts(t *testing.T) {
|
||||
cases := []ports{
|
||||
{
|
||||
[]types.Port{
|
||||
{
|
||||
PrivatePort: 9988,
|
||||
Type: "tcp",
|
||||
},
|
||||
},
|
||||
"9988/tcp"},
|
||||
{
|
||||
[]types.Port{
|
||||
{
|
||||
PrivatePort: 9988,
|
||||
Type: "udp",
|
||||
},
|
||||
},
|
||||
"9988/udp",
|
||||
},
|
||||
{
|
||||
[]types.Port{
|
||||
{
|
||||
IP: "0.0.0.0",
|
||||
PrivatePort: 9988,
|
||||
Type: "tcp",
|
||||
},
|
||||
},
|
||||
"0.0.0.0:0->9988/tcp",
|
||||
},
|
||||
{
|
||||
[]types.Port{
|
||||
{
|
||||
PrivatePort: 9988,
|
||||
PublicPort: 8899,
|
||||
Type: "tcp",
|
||||
},
|
||||
},
|
||||
"9988/tcp",
|
||||
},
|
||||
{
|
||||
[]types.Port{
|
||||
{
|
||||
IP: "4.3.2.1",
|
||||
PrivatePort: 9988,
|
||||
PublicPort: 8899,
|
||||
Type: "tcp",
|
||||
},
|
||||
},
|
||||
"4.3.2.1:8899->9988/tcp",
|
||||
},
|
||||
{
|
||||
[]types.Port{
|
||||
{
|
||||
IP: "4.3.2.1",
|
||||
PrivatePort: 9988,
|
||||
PublicPort: 9988,
|
||||
Type: "tcp",
|
||||
},
|
||||
},
|
||||
"4.3.2.1:9988->9988/tcp",
|
||||
},
|
||||
{
|
||||
[]types.Port{
|
||||
{
|
||||
PrivatePort: 9988,
|
||||
Type: "udp",
|
||||
}, {
|
||||
PrivatePort: 9988,
|
||||
Type: "udp",
|
||||
},
|
||||
},
|
||||
"9988/udp, 9988/udp",
|
||||
},
|
||||
{
|
||||
[]types.Port{
|
||||
{
|
||||
IP: "1.2.3.4",
|
||||
PublicPort: 9998,
|
||||
PrivatePort: 9998,
|
||||
Type: "udp",
|
||||
}, {
|
||||
IP: "1.2.3.4",
|
||||
PublicPort: 9999,
|
||||
PrivatePort: 9999,
|
||||
Type: "udp",
|
||||
},
|
||||
},
|
||||
"1.2.3.4:9998-9999->9998-9999/udp",
|
||||
},
|
||||
{
|
||||
[]types.Port{
|
||||
{
|
||||
IP: "1.2.3.4",
|
||||
PublicPort: 8887,
|
||||
PrivatePort: 9998,
|
||||
Type: "udp",
|
||||
}, {
|
||||
IP: "1.2.3.4",
|
||||
PublicPort: 8888,
|
||||
PrivatePort: 9999,
|
||||
Type: "udp",
|
||||
},
|
||||
},
|
||||
"1.2.3.4:8887->9998/udp, 1.2.3.4:8888->9999/udp",
|
||||
},
|
||||
{
|
||||
[]types.Port{
|
||||
{
|
||||
PrivatePort: 9998,
|
||||
Type: "udp",
|
||||
}, {
|
||||
PrivatePort: 9999,
|
||||
Type: "udp",
|
||||
},
|
||||
},
|
||||
"9998-9999/udp",
|
||||
},
|
||||
{
|
||||
[]types.Port{
|
||||
{
|
||||
IP: "1.2.3.4",
|
||||
PrivatePort: 6677,
|
||||
PublicPort: 7766,
|
||||
Type: "tcp",
|
||||
}, {
|
||||
PrivatePort: 9988,
|
||||
PublicPort: 8899,
|
||||
Type: "udp",
|
||||
},
|
||||
},
|
||||
"9988/udp, 1.2.3.4:7766->6677/tcp",
|
||||
},
|
||||
{
|
||||
[]types.Port{
|
||||
{
|
||||
IP: "1.2.3.4",
|
||||
PrivatePort: 9988,
|
||||
PublicPort: 8899,
|
||||
Type: "udp",
|
||||
}, {
|
||||
IP: "1.2.3.4",
|
||||
PrivatePort: 9988,
|
||||
PublicPort: 8899,
|
||||
Type: "tcp",
|
||||
}, {
|
||||
IP: "4.3.2.1",
|
||||
PrivatePort: 2233,
|
||||
PublicPort: 3322,
|
||||
Type: "tcp",
|
||||
},
|
||||
},
|
||||
"4.3.2.1:3322->2233/tcp, 1.2.3.4:8899->9988/tcp, 1.2.3.4:8899->9988/udp",
|
||||
},
|
||||
{
|
||||
[]types.Port{
|
||||
{
|
||||
PrivatePort: 9988,
|
||||
PublicPort: 8899,
|
||||
Type: "udp",
|
||||
}, {
|
||||
IP: "1.2.3.4",
|
||||
PrivatePort: 6677,
|
||||
PublicPort: 7766,
|
||||
Type: "tcp",
|
||||
}, {
|
||||
IP: "4.3.2.1",
|
||||
PrivatePort: 2233,
|
||||
PublicPort: 3322,
|
||||
Type: "tcp",
|
||||
},
|
||||
},
|
||||
"9988/udp, 4.3.2.1:3322->2233/tcp, 1.2.3.4:7766->6677/tcp",
|
||||
},
|
||||
{
|
||||
[]types.Port{
|
||||
{
|
||||
PrivatePort: 80,
|
||||
Type: "tcp",
|
||||
}, {
|
||||
PrivatePort: 1024,
|
||||
Type: "tcp",
|
||||
}, {
|
||||
PrivatePort: 80,
|
||||
Type: "udp",
|
||||
}, {
|
||||
PrivatePort: 1024,
|
||||
Type: "udp",
|
||||
}, {
|
||||
IP: "1.1.1.1",
|
||||
PublicPort: 80,
|
||||
PrivatePort: 1024,
|
||||
Type: "tcp",
|
||||
}, {
|
||||
IP: "1.1.1.1",
|
||||
PublicPort: 80,
|
||||
PrivatePort: 1024,
|
||||
Type: "udp",
|
||||
}, {
|
||||
IP: "1.1.1.1",
|
||||
PublicPort: 1024,
|
||||
PrivatePort: 80,
|
||||
Type: "tcp",
|
||||
}, {
|
||||
IP: "1.1.1.1",
|
||||
PublicPort: 1024,
|
||||
PrivatePort: 80,
|
||||
Type: "udp",
|
||||
}, {
|
||||
IP: "2.1.1.1",
|
||||
PublicPort: 80,
|
||||
PrivatePort: 1024,
|
||||
Type: "tcp",
|
||||
}, {
|
||||
IP: "2.1.1.1",
|
||||
PublicPort: 80,
|
||||
PrivatePort: 1024,
|
||||
Type: "udp",
|
||||
}, {
|
||||
IP: "2.1.1.1",
|
||||
PublicPort: 1024,
|
||||
PrivatePort: 80,
|
||||
Type: "tcp",
|
||||
}, {
|
||||
IP: "2.1.1.1",
|
||||
PublicPort: 1024,
|
||||
PrivatePort: 80,
|
||||
Type: "udp",
|
||||
},
|
||||
},
|
||||
"80/tcp, 80/udp, 1024/tcp, 1024/udp, 1.1.1.1:1024->80/tcp, 1.1.1.1:1024->80/udp, 2.1.1.1:1024->80/tcp, 2.1.1.1:1024->80/udp, 1.1.1.1:80->1024/tcp, 1.1.1.1:80->1024/udp, 2.1.1.1:80->1024/tcp, 2.1.1.1:80->1024/udp",
|
||||
},
|
||||
}
|
||||
|
||||
for _, port := range cases {
|
||||
actual := DisplayablePorts(port.ports)
|
||||
if port.expected != actual {
|
||||
t.Fatalf("Expected %s, got %s.", port.expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MatchesContentType
|
||||
func TestJsonContentType(t *testing.T) {
|
||||
if !MatchesContentType("application/json", "application/json") {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if !MatchesContentType("application/json; charset=utf-8", "application/json") {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if MatchesContentType("dockerapplication/json", "application/json") {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
// LoadOrCreateTrustKey
|
||||
func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) {
|
||||
tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpKeyFolderPath)
|
||||
|
||||
tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil {
|
||||
t.Fatalf("expected an error, got nothing.")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) {
|
||||
tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpKeyFolderPath)
|
||||
|
||||
// Without the need to create the folder hierarchy
|
||||
tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile")
|
||||
|
||||
if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil {
|
||||
t.Fatalf("expected a new key file, got : %v and %v", err, key)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(tmpKeyFile); err != nil {
|
||||
t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err)
|
||||
}
|
||||
|
||||
// With the need to create the folder hierarchy as tmpKeyFie is in a path
|
||||
// where some folder do not exists.
|
||||
tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile")
|
||||
|
||||
if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil {
|
||||
t.Fatalf("expected a new key file, got : %v and %v", err, key)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(tmpKeyFile); err != nil {
|
||||
t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err)
|
||||
}
|
||||
|
||||
// With no path at all
|
||||
defer os.Remove("keyfile")
|
||||
if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil {
|
||||
t.Fatalf("expected a new key file, got : %v and %v", err, key)
|
||||
}
|
||||
|
||||
if _, err := os.Stat("keyfile"); err != nil {
|
||||
t.Fatalf("Expected to find a file keyfile, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) {
|
||||
tmpKeyFile := filepath.Join("fixtures", "keyfile")
|
||||
|
||||
if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil {
|
||||
t.Fatalf("expected a key file, got : %v and %v", err, key)
|
||||
}
|
||||
}
|
7
vendor/github.com/docker/docker/api/fixtures/keyfile
generated
vendored
7
vendor/github.com/docker/docker/api/fixtures/keyfile
generated
vendored
@ -1,7 +0,0 @@
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
keyID: AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY
|
||||
|
||||
MHcCAQEEILHTRWdcpKWsnORxSFyBnndJ4ROU41hMtr/GCiLVvwBQoAoGCCqGSM49
|
||||
AwEHoUQDQgAElpVFbQ2V2UQKajqdE3fVxJ+/pE/YuEFOxWbOxF2be19BY209/iky
|
||||
NzeFFK7SLpQ4CBJ7zDVXOHsMzrkY/GquGA==
|
||||
-----END EC PRIVATE KEY-----
|
73
vendor/github.com/docker/docker/api/server/httputils/form.go
generated
vendored
73
vendor/github.com/docker/docker/api/server/httputils/form.go
generated
vendored
@ -1,73 +0,0 @@
|
||||
package httputils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BoolValue transforms a form value in different formats into a boolean type.
|
||||
func BoolValue(r *http.Request, k string) bool {
|
||||
s := strings.ToLower(strings.TrimSpace(r.FormValue(k)))
|
||||
return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none")
|
||||
}
|
||||
|
||||
// BoolValueOrDefault returns the default bool passed if the query param is
|
||||
// missing, otherwise it's just a proxy to boolValue above
|
||||
func BoolValueOrDefault(r *http.Request, k string, d bool) bool {
|
||||
if _, ok := r.Form[k]; !ok {
|
||||
return d
|
||||
}
|
||||
return BoolValue(r, k)
|
||||
}
|
||||
|
||||
// Int64ValueOrZero parses a form value into an int64 type.
|
||||
// It returns 0 if the parsing fails.
|
||||
func Int64ValueOrZero(r *http.Request, k string) int64 {
|
||||
val, err := Int64ValueOrDefault(r, k, 0)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// Int64ValueOrDefault parses a form value into an int64 type. If there is an
|
||||
// error, returns the error. If there is no value returns the default value.
|
||||
func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) {
|
||||
if r.Form.Get(field) != "" {
|
||||
value, err := strconv.ParseInt(r.Form.Get(field), 10, 64)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
return def, nil
|
||||
}
|
||||
|
||||
// ArchiveOptions stores archive information for different operations.
|
||||
type ArchiveOptions struct {
|
||||
Name string
|
||||
Path string
|
||||
}
|
||||
|
||||
// ArchiveFormValues parses form values and turns them into ArchiveOptions.
|
||||
// It fails if the archive name and path are not in the request.
|
||||
func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) {
|
||||
if err := ParseForm(r); err != nil {
|
||||
return ArchiveOptions{}, err
|
||||
}
|
||||
|
||||
name := vars["name"]
|
||||
path := filepath.FromSlash(r.Form.Get("path"))
|
||||
|
||||
switch {
|
||||
case name == "":
|
||||
return ArchiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty")
|
||||
case path == "":
|
||||
return ArchiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty")
|
||||
}
|
||||
|
||||
return ArchiveOptions{name, path}, nil
|
||||
}
|
105
vendor/github.com/docker/docker/api/server/httputils/form_test.go
generated
vendored
105
vendor/github.com/docker/docker/api/server/httputils/form_test.go
generated
vendored
@ -1,105 +0,0 @@
|
||||
package httputils
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBoolValue(t *testing.T) {
|
||||
cases := map[string]bool{
|
||||
"": false,
|
||||
"0": false,
|
||||
"no": false,
|
||||
"false": false,
|
||||
"none": false,
|
||||
"1": true,
|
||||
"yes": true,
|
||||
"true": true,
|
||||
"one": true,
|
||||
"100": true,
|
||||
}
|
||||
|
||||
for c, e := range cases {
|
||||
v := url.Values{}
|
||||
v.Set("test", c)
|
||||
r, _ := http.NewRequest("POST", "", nil)
|
||||
r.Form = v
|
||||
|
||||
a := BoolValue(r, "test")
|
||||
if a != e {
|
||||
t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBoolValueOrDefault(t *testing.T) {
|
||||
r, _ := http.NewRequest("GET", "", nil)
|
||||
if !BoolValueOrDefault(r, "queryparam", true) {
|
||||
t.Fatal("Expected to get true default value, got false")
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("param", "")
|
||||
r, _ = http.NewRequest("GET", "", nil)
|
||||
r.Form = v
|
||||
if BoolValueOrDefault(r, "param", true) {
|
||||
t.Fatal("Expected not to get true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt64ValueOrZero(t *testing.T) {
|
||||
cases := map[string]int64{
|
||||
"": 0,
|
||||
"asdf": 0,
|
||||
"0": 0,
|
||||
"1": 1,
|
||||
}
|
||||
|
||||
for c, e := range cases {
|
||||
v := url.Values{}
|
||||
v.Set("test", c)
|
||||
r, _ := http.NewRequest("POST", "", nil)
|
||||
r.Form = v
|
||||
|
||||
a := Int64ValueOrZero(r, "test")
|
||||
if a != e {
|
||||
t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt64ValueOrDefault(t *testing.T) {
|
||||
cases := map[string]int64{
|
||||
"": -1,
|
||||
"-1": -1,
|
||||
"42": 42,
|
||||
}
|
||||
|
||||
for c, e := range cases {
|
||||
v := url.Values{}
|
||||
v.Set("test", c)
|
||||
r, _ := http.NewRequest("POST", "", nil)
|
||||
r.Form = v
|
||||
|
||||
a, err := Int64ValueOrDefault(r, "test", -1)
|
||||
if a != e {
|
||||
t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("Error should be nil, but received: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt64ValueOrDefaultWithError(t *testing.T) {
|
||||
v := url.Values{}
|
||||
v.Set("test", "invalid")
|
||||
r, _ := http.NewRequest("POST", "", nil)
|
||||
r.Form = v
|
||||
|
||||
_, err := Int64ValueOrDefault(r, "test", -1)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected an error.")
|
||||
}
|
||||
}
|
180
vendor/github.com/docker/docker/api/server/httputils/httputils.go
generated
vendored
180
vendor/github.com/docker/docker/api/server/httputils/httputils.go
generated
vendored
@ -1,180 +0,0 @@
|
||||
package httputils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/pkg/version"
|
||||
"github.com/docker/docker/utils"
|
||||
)
|
||||
|
||||
// APIVersionKey is the client's requested API version.
|
||||
const APIVersionKey = "api-version"
|
||||
|
||||
// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints.
|
||||
// Any function that has the appropriate signature can be register as a API endpoint (e.g. getVersion).
|
||||
type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error
|
||||
|
||||
// HijackConnection interrupts the http response writer to get the
|
||||
// underlying connection and operate with it.
|
||||
func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) {
|
||||
conn, _, err := w.(http.Hijacker).Hijack()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Flush the options to make sure the client sets the raw mode
|
||||
conn.Write([]byte{})
|
||||
return conn, conn, nil
|
||||
}
|
||||
|
||||
// CloseStreams ensures that a list for http streams are properly closed.
|
||||
func CloseStreams(streams ...interface{}) {
|
||||
for _, stream := range streams {
|
||||
if tcpc, ok := stream.(interface {
|
||||
CloseWrite() error
|
||||
}); ok {
|
||||
tcpc.CloseWrite()
|
||||
} else if closer, ok := stream.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CheckForJSON makes sure that the request's Content-Type is application/json.
|
||||
func CheckForJSON(r *http.Request) error {
|
||||
ct := r.Header.Get("Content-Type")
|
||||
|
||||
// No Content-Type header is ok as long as there's no Body
|
||||
if ct == "" {
|
||||
if r.Body == nil || r.ContentLength == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise it better be json
|
||||
if api.MatchesContentType(ct, "application/json") {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct)
|
||||
}
|
||||
|
||||
// ParseForm ensures the request form is parsed even with invalid content types.
|
||||
// If we don't do this, POST method without Content-type (even with empty body) will fail.
|
||||
func ParseForm(r *http.Request) error {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseMultipartForm ensure the request form is parsed, even with invalid content types.
|
||||
func ParseMultipartForm(r *http.Request) error {
|
||||
if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteError decodes a specific docker error and sends it in the response.
|
||||
func WriteError(w http.ResponseWriter, err error) {
|
||||
if err == nil || w == nil {
|
||||
logrus.WithFields(logrus.Fields{"error": err, "writer": w}).Error("unexpected HTTP error handling")
|
||||
return
|
||||
}
|
||||
|
||||
statusCode := http.StatusInternalServerError
|
||||
errMsg := err.Error()
|
||||
|
||||
// Based on the type of error we get we need to process things
|
||||
// slightly differently to extract the error message.
|
||||
// In the 'errcode.*' cases there are two different type of
|
||||
// error that could be returned. errocode.ErrorCode is the base
|
||||
// type of error object - it is just an 'int' that can then be
|
||||
// used as the look-up key to find the message. errorcode.Error
|
||||
// extends errorcode.Error by adding error-instance specific
|
||||
// data, like 'details' or variable strings to be inserted into
|
||||
// the message.
|
||||
//
|
||||
// Ideally, we should just be able to call err.Error() for all
|
||||
// cases but the errcode package doesn't support that yet.
|
||||
//
|
||||
// Additionally, in both errcode cases, there might be an http
|
||||
// status code associated with it, and if so use it.
|
||||
switch err.(type) {
|
||||
case errcode.ErrorCode:
|
||||
daError, _ := err.(errcode.ErrorCode)
|
||||
statusCode = daError.Descriptor().HTTPStatusCode
|
||||
errMsg = daError.Message()
|
||||
|
||||
case errcode.Error:
|
||||
// For reference, if you're looking for a particular error
|
||||
// then you can do something like :
|
||||
// import ( derr "github.com/docker/docker/errors" )
|
||||
// if daError.ErrorCode() == derr.ErrorCodeNoSuchContainer { ... }
|
||||
|
||||
daError, _ := err.(errcode.Error)
|
||||
statusCode = daError.ErrorCode().Descriptor().HTTPStatusCode
|
||||
errMsg = daError.Message
|
||||
|
||||
default:
|
||||
// This part of will be removed once we've
|
||||
// converted everything over to use the errcode package
|
||||
|
||||
// FIXME: this is brittle and should not be necessary.
|
||||
// If we need to differentiate between different possible error types,
|
||||
// we should create appropriate error types with clearly defined meaning
|
||||
errStr := strings.ToLower(err.Error())
|
||||
for keyword, status := range map[string]int{
|
||||
"not found": http.StatusNotFound,
|
||||
"no such": http.StatusNotFound,
|
||||
"bad parameter": http.StatusBadRequest,
|
||||
"conflict": http.StatusConflict,
|
||||
"impossible": http.StatusNotAcceptable,
|
||||
"wrong login/password": http.StatusUnauthorized,
|
||||
"hasn't been activated": http.StatusForbidden,
|
||||
} {
|
||||
if strings.Contains(errStr, keyword) {
|
||||
statusCode = status
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if statusCode == 0 {
|
||||
statusCode = http.StatusInternalServerError
|
||||
}
|
||||
|
||||
logrus.WithFields(logrus.Fields{"statusCode": statusCode, "err": utils.GetErrorMessage(err)}).Error("HTTP Error")
|
||||
http.Error(w, errMsg, statusCode)
|
||||
}
|
||||
|
||||
// WriteJSON writes the value v to the http response stream as json with standard json encoding.
|
||||
func WriteJSON(w http.ResponseWriter, code int, v interface{}) error {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(code)
|
||||
return json.NewEncoder(w).Encode(v)
|
||||
}
|
||||
|
||||
// VersionFromContext returns an API version from the context using APIVersionKey.
|
||||
// It panics if the context value does not have version.Version type.
|
||||
func VersionFromContext(ctx context.Context) (ver version.Version) {
|
||||
if ctx == nil {
|
||||
return
|
||||
}
|
||||
val := ctx.Value(APIVersionKey)
|
||||
if val == nil {
|
||||
return
|
||||
}
|
||||
return val.(version.Version)
|
||||
}
|
154
vendor/github.com/docker/docker/api/server/middleware.go
generated
vendored
154
vendor/github.com/docker/docker/api/server/middleware.go
generated
vendored
@ -1,154 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/server/httputils"
|
||||
"github.com/docker/docker/dockerversion"
|
||||
"github.com/docker/docker/errors"
|
||||
"github.com/docker/docker/pkg/version"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// middleware is an adapter to allow the use of ordinary functions as Docker API filters.
|
||||
// Any function that has the appropriate signature can be register as a middleware.
|
||||
type middleware func(handler httputils.APIFunc) httputils.APIFunc
|
||||
|
||||
// loggingMiddleware logs each request when logging is enabled.
|
||||
func (s *Server) loggingMiddleware(handler httputils.APIFunc) httputils.APIFunc {
|
||||
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if s.cfg.Logging {
|
||||
logrus.Infof("%s %s", r.Method, r.RequestURI)
|
||||
}
|
||||
return handler(ctx, w, r, vars)
|
||||
}
|
||||
}
|
||||
|
||||
// debugRequestMiddleware dumps the request to logger
|
||||
// This is implemented separately from `loggingMiddleware` so that we don't have to
|
||||
// check the logging level or have httputil.DumpRequest called on each request.
|
||||
// Instead the middleware is only injected when the logging level is set to debug
|
||||
func (s *Server) debugRequestMiddleware(handler httputils.APIFunc) httputils.APIFunc {
|
||||
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if s.cfg.Logging && r.Method == "POST" {
|
||||
if err := httputils.CheckForJSON(r); err == nil {
|
||||
var buf bytes.Buffer
|
||||
if _, err := buf.ReadFrom(r.Body); err == nil {
|
||||
r.Body.Close()
|
||||
r.Body = ioutil.NopCloser(&buf)
|
||||
var postForm map[string]interface{}
|
||||
if err := json.Unmarshal(buf.Bytes(), &postForm); err == nil {
|
||||
if _, exists := postForm["password"]; exists {
|
||||
postForm["password"] = "*****"
|
||||
}
|
||||
logrus.Debugf("form data: %q", postForm)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return handler(ctx, w, r, vars)
|
||||
}
|
||||
}
|
||||
|
||||
// userAgentMiddleware checks the User-Agent header looking for a valid docker client spec.
|
||||
func (s *Server) userAgentMiddleware(handler httputils.APIFunc) httputils.APIFunc {
|
||||
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
|
||||
dockerVersion := version.Version(s.cfg.Version)
|
||||
|
||||
userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
|
||||
|
||||
// v1.20 onwards includes the GOOS of the client after the version
|
||||
// such as Docker/1.7.0 (linux)
|
||||
if len(userAgent) == 2 && strings.Contains(userAgent[1], " ") {
|
||||
userAgent[1] = strings.Split(userAgent[1], " ")[0]
|
||||
}
|
||||
|
||||
if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) {
|
||||
logrus.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
|
||||
}
|
||||
}
|
||||
return handler(ctx, w, r, vars)
|
||||
}
|
||||
}
|
||||
|
||||
// corsMiddleware sets the CORS header expectations in the server.
|
||||
func (s *Server) corsMiddleware(handler httputils.APIFunc) httputils.APIFunc {
|
||||
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
// If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*"
|
||||
// otherwise, all head values will be passed to HTTP handler
|
||||
corsHeaders := s.cfg.CorsHeaders
|
||||
if corsHeaders == "" && s.cfg.EnableCors {
|
||||
corsHeaders = "*"
|
||||
}
|
||||
|
||||
if corsHeaders != "" {
|
||||
writeCorsHeaders(w, r, corsHeaders)
|
||||
}
|
||||
return handler(ctx, w, r, vars)
|
||||
}
|
||||
}
|
||||
|
||||
// versionMiddleware checks the api version requirements before passing the request to the server handler.
|
||||
func versionMiddleware(handler httputils.APIFunc) httputils.APIFunc {
|
||||
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
apiVersion := version.Version(vars["version"])
|
||||
if apiVersion == "" {
|
||||
apiVersion = api.Version
|
||||
}
|
||||
|
||||
if apiVersion.GreaterThan(api.Version) {
|
||||
return errors.ErrorCodeNewerClientVersion.WithArgs(apiVersion, api.Version)
|
||||
}
|
||||
if apiVersion.LessThan(api.MinVersion) {
|
||||
return errors.ErrorCodeOldClientVersion.WithArgs(apiVersion, api.Version)
|
||||
}
|
||||
|
||||
w.Header().Set("Server", "Docker/"+dockerversion.Version+" ("+runtime.GOOS+")")
|
||||
ctx = context.WithValue(ctx, httputils.APIVersionKey, apiVersion)
|
||||
return handler(ctx, w, r, vars)
|
||||
}
|
||||
}
|
||||
|
||||
// handleWithGlobalMiddlwares wraps the handler function for a request with
|
||||
// the server's global middlewares. The order of the middlewares is backwards,
|
||||
// meaning that the first in the list will be evaludated last.
|
||||
//
|
||||
// Example: handleWithGlobalMiddlewares(s.getContainersName)
|
||||
//
|
||||
// s.loggingMiddleware(
|
||||
// s.userAgentMiddleware(
|
||||
// s.corsMiddleware(
|
||||
// versionMiddleware(s.getContainersName)
|
||||
// )
|
||||
// )
|
||||
// )
|
||||
// )
|
||||
func (s *Server) handleWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc {
|
||||
middlewares := []middleware{
|
||||
versionMiddleware,
|
||||
s.corsMiddleware,
|
||||
s.userAgentMiddleware,
|
||||
s.loggingMiddleware,
|
||||
}
|
||||
|
||||
// Only want this on debug level
|
||||
// this is separate from the logging middleware so that we can do this check here once,
|
||||
// rather than for each request.
|
||||
if logrus.GetLevel() == logrus.DebugLevel {
|
||||
middlewares = append(middlewares, s.debugRequestMiddleware)
|
||||
}
|
||||
|
||||
h := handler
|
||||
for _, m := range middlewares {
|
||||
h = m(h)
|
||||
}
|
||||
return h
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user